summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoreopXD <yueh.ting.chen@gmail.com>2023-01-27 01:46:11 -0800
committerTobias Hieta <tobias@hieta.se>2023-02-05 21:10:40 +0100
commit2b8bbad770c0bb7fa19a43dbf222a1c2e9a6cff9 (patch)
tree23cd59dc565bfb39756e9db07de93c6421be5b1f
parent28ed0f0b5907ae5996865fdb0e6f8a99d6de7e2e (diff)
downloadllvm-2b8bbad770c0bb7fa19a43dbf222a1c2e9a6cff9.tar.gz
[3/3][Clang][RISCV] Add `__riscv_` for overloaded intrinsics
This commit adds prefix for the overloaded RVV intrinsics. This is the 3rd commit of a patch-set to add __riscv_ for all RVV intrinsics. This follows the naming guideline under riscv-c-api-doc to add the `__riscv_` suffix for all RVV intrinsics. Pull Request: riscv-non-isa/riscv-c-api-doc#31 riscv-non-isa/rvv-intrinsic-doc#189 Depends on D142644. Reviewed By: kito-cheng Differential Revision: https://reviews.llvm.org/D142697
-rw-r--r--clang/lib/Support/RISCVVIntrinsicUtils.cpp1
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaadd.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaaddu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadc.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadd.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vand.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasub.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasubu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c118
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpop.c28
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdiv.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdivu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfabs.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c360
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfdiv.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfirst.c28
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c30
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c30
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c384
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfneg.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrdiv.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c300
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c44
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c44
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vget.c132
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16ff.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32.c30
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32ff.c30
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64ff.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8.c28
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8ff.c28
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul.c540
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei16.c228
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei64.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei8.c236
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c192
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c184
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c164
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c192
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c140
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c140
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse32.c30
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse64.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse8.c28
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16.c30
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c30
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8.c20
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c20
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8.c20
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c20
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c30
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c20
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c20
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei16.c228
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei64.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei8.c236
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c192
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c184
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c164
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c192
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c140
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c140
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadc.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmand.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmandn.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmax.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmaxu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c206
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmin.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vminu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmmv.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnand.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnor.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnot.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmor.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmorn.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbc.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbf.c28
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmseq.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsge.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgeu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgt.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgtu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsif.c28
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsle.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsleu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmslt.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsltu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsne.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsof.c28
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmul.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulh.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhsu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c206
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxnor.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxor.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vncvt.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vneg.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnot.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsra.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsrl.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vor.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vreinterpret.c432
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrem.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vremu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgatherei16.c228
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrsub.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsbc.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse16.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse32.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse64.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse8.c56
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vset.c132
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c112
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1down.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1up.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c236
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c236
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsll.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei16.c228
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei64.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei8.c236
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c192
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c184
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c164
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c192
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c140
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c140
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsra.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsrl.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse16.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse32.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse64.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse8.c56
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e16.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e32.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e64.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e16.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e64.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e8.c40
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e16.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e64.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e8.c40
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e8.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e8.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e8.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e8.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e16.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e64.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e16.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e64.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e8.c40
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e16.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e64.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e8.c40
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e8.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e8.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e8.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e16.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e32.c24
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e64.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e8.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsub.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei16.c228
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei64.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei8.c236
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c192
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c184
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c164
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c192
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c140
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c140
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c148
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c104
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwadd.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwaddu.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvt.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvtu.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmacc.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccsu.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccu.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccus.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmul.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulsu.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulu.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsub.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsubu.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vxor.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c112
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaadd.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaaddu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadc.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadd.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vand.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasub.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasubu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c118
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdiv.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdivu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfabs.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt.c720
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfdiv.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c30
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt.c768
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfneg.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrdiv.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmax.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmin.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredosum.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredusum.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd.c288
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt.c600
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredosum.c44
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredusum.c44
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub.c288
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vid.c132
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/viota.c132
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16.c108
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16ff.c108
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32.c90
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32ff.c90
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64ff.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8.c84
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8ff.c84
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei16.c456
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei32.c416
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei64.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei8.c472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c384
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c368
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c328
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c384
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c280
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c280
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse16.c108
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse32.c90
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse64.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse8.c84
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16.c90
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c90
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c90
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c54
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c18
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei16.c456
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei32.c416
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei64.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei8.c472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c384
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c368
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c328
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c384
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c280
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c280
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c208
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmacc.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmadd.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmax.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmaxu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c206
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne.c60
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmin.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vminu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsbf.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmseq.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsge.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgeu.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgt.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgtu.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsif.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsle.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsleu.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmslt.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsltu.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsne.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsof.c14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmul.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulh.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhsu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c294
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclip.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclipu.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vncvt.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vneg.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsac.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsub.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnot.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsra.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsrl.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vor.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredand.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmax.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmaxu.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmin.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredminu.c88
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredor.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredsum.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredxor.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrem.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vremu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c944
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgatherei16.c456
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrsub.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsadd.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsaddu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsbc.c176
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext.c224
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1down.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1up.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslideup.c472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsll.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsmul.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsra.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsrl.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssra.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssrl.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssub.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssubu.c352
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsub.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwadd.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwaddu.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvt.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvtu.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmacc.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccsu.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccu.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccus.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmul.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulsu.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulu.c240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsum.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsumu.c72
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsub.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsubu.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vxor.c704
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext.c224
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-overloaded.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vlenb.c2
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64-overloaded.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-overloaded.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64-overloaded.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-overloaded.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64-overloaded.c32
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c144
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vset-overloaded.c120
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c32
766 files changed, 54147 insertions, 54146 deletions
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 3da35d9d2f6b..86da7e86f831 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -1005,6 +1005,7 @@ void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
// This follows the naming guideline under riscv-c-api-doc to add the
// `__riscv_` suffix for all RVV intrinsics.
Name = "__riscv_" + Name;
+ OverloadedName = "__riscv_" + OverloadedName;
if (IsMasked) {
if (PolicyAttrs.isTUMUPolicy())
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaadd.c
index d9d51b159546..c64137396d8d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaadd.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vaadd(op1, op2, vl);
+ return __riscv_vaadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vaadd(mask, op1, op2, vl);
+ return __riscv_vaadd(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaaddu.c
index 315a7b66a9d3..d19f71099032 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaaddu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaaddu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vaaddu(op1, op2, vl);
+ return __riscv_vaaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vaaddu(mask, op1, op2, vl);
+ return __riscv_vaaddu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadc.c
index 89cba06bfa54..ccb72a437e43 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadc.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t car
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t carryin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t car
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t carryin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t car
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t carryin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8(
@@ -408,7 +408,7 @@ vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carr
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carr
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carr
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8m1(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8m1(
@@ -471,7 +471,7 @@ vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8m2(
@@ -480,7 +480,7 @@ vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8m2(
@@ -489,7 +489,7 @@ vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8m4(
@@ -498,7 +498,7 @@ vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8m4(
@@ -507,7 +507,7 @@ vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8m8(
@@ -516,7 +516,7 @@ vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8m8(
@@ -525,7 +525,7 @@ vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4(
@@ -534,7 +534,7 @@ vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t carr
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16m1(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t carr
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16m1(
@@ -579,7 +579,7 @@ vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carr
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16m2(
@@ -588,7 +588,7 @@ vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t carryin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16m2(
@@ -597,7 +597,7 @@ vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carry
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16m4(
@@ -606,7 +606,7 @@ vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16m4(
@@ -615,7 +615,7 @@ vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carry
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16m8(
@@ -624,7 +624,7 @@ vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16m8(
@@ -633,7 +633,7 @@ vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carry
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2(
@@ -642,7 +642,7 @@ vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32m1(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t carr
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32m1(
@@ -669,7 +669,7 @@ vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carr
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32m2(
@@ -678,7 +678,7 @@ vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t carryin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32m2(
@@ -687,7 +687,7 @@ vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carr
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32m4(
@@ -696,7 +696,7 @@ vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t carryin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32m4(
@@ -705,7 +705,7 @@ vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carry
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32m8(
@@ -714,7 +714,7 @@ vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32m8(
@@ -723,7 +723,7 @@ vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carry
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u64m1(
@@ -732,7 +732,7 @@ vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u64m1(
@@ -741,7 +741,7 @@ vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carr
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u64m2(
@@ -750,7 +750,7 @@ vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t carryin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u64m2(
@@ -759,7 +759,7 @@ vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carr
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u64m4(
@@ -768,7 +768,7 @@ vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t carryin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u64m4(
@@ -777,7 +777,7 @@ vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carr
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u64m8(
@@ -786,7 +786,7 @@ vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t carryin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u64m8(
@@ -795,6 +795,6 @@ vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carry
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) {
- return vadc(op1, op2, carryin, vl);
+ return __riscv_vadc(op1, op2, carryin, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadd.c
index 37b74dc966d4..dd63888815d7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadd.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8(
@@ -408,7 +408,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m1(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m1(
@@ -471,7 +471,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m2(
@@ -480,7 +480,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m2(
@@ -489,7 +489,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m4(
@@ -498,7 +498,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m4(
@@ -507,7 +507,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m8(
@@ -516,7 +516,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m8(
@@ -525,7 +525,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4(
@@ -534,7 +534,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m1(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m1(
@@ -579,7 +579,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m2(
@@ -588,7 +588,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m2(
@@ -597,7 +597,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m4(
@@ -606,7 +606,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m4(
@@ -615,7 +615,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m8(
@@ -624,7 +624,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m8(
@@ -633,7 +633,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2(
@@ -642,7 +642,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m1(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m1(
@@ -669,7 +669,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m2(
@@ -678,7 +678,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m2(
@@ -687,7 +687,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m4(
@@ -696,7 +696,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m4(
@@ -705,7 +705,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m8(
@@ -714,7 +714,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m8(
@@ -723,7 +723,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m1(
@@ -732,7 +732,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m1(
@@ -741,7 +741,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m2(
@@ -750,7 +750,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m2(
@@ -759,7 +759,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m4(
@@ -768,7 +768,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m4(
@@ -777,7 +777,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m8(
@@ -786,7 +786,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m8(
@@ -795,7 +795,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vadd(op1, op2, vl);
+ return __riscv_vadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m(
@@ -804,7 +804,7 @@ vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m(
@@ -813,7 +813,7 @@ vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m(
@@ -822,7 +822,7 @@ vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m(
@@ -831,7 +831,7 @@ vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m(
@@ -840,7 +840,7 @@ vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m(
@@ -849,7 +849,7 @@ vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m(
@@ -858,7 +858,7 @@ vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m(
@@ -867,7 +867,7 @@ vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m(
@@ -876,7 +876,7 @@ vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m(
@@ -885,7 +885,7 @@ vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m(
@@ -894,7 +894,7 @@ vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m(
@@ -903,7 +903,7 @@ vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m(
@@ -912,7 +912,7 @@ vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m(
@@ -921,7 +921,7 @@ vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m(
@@ -930,7 +930,7 @@ vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m(
@@ -939,7 +939,7 @@ vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m(
@@ -948,7 +948,7 @@ vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m(
@@ -957,7 +957,7 @@ vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m(
@@ -966,7 +966,7 @@ vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m(
@@ -975,7 +975,7 @@ vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m(
@@ -984,7 +984,7 @@ vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m(
@@ -993,7 +993,7 @@ vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vadd(mask, op1, op2, vl);
+ return __riscv_vadd(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vand.c
index f878ef9c1a79..72ea90576b08 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vand.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
@@ -408,7 +408,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m1(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m1(
@@ -471,7 +471,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m2(
@@ -480,7 +480,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m2(
@@ -489,7 +489,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m4(
@@ -498,7 +498,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m4(
@@ -507,7 +507,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m8(
@@ -516,7 +516,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m8(
@@ -525,7 +525,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
@@ -534,7 +534,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m1(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m1(
@@ -579,7 +579,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m2(
@@ -588,7 +588,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m2(
@@ -597,7 +597,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m4(
@@ -606,7 +606,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m4(
@@ -615,7 +615,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m8(
@@ -624,7 +624,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m8(
@@ -633,7 +633,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
@@ -642,7 +642,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m1(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m1(
@@ -669,7 +669,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m2(
@@ -678,7 +678,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m2(
@@ -687,7 +687,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m4(
@@ -696,7 +696,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m4(
@@ -705,7 +705,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m8(
@@ -714,7 +714,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m8(
@@ -723,7 +723,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m1(
@@ -732,7 +732,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m1(
@@ -741,7 +741,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m2(
@@ -750,7 +750,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m2(
@@ -759,7 +759,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m4(
@@ -768,7 +768,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m4(
@@ -777,7 +777,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m8(
@@ -786,7 +786,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m8(
@@ -795,7 +795,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vand(op1, op2, vl);
+ return __riscv_vand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m(
@@ -804,7 +804,7 @@ vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m(
@@ -813,7 +813,7 @@ vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m(
@@ -822,7 +822,7 @@ vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m(
@@ -831,7 +831,7 @@ vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m(
@@ -840,7 +840,7 @@ vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m(
@@ -849,7 +849,7 @@ vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m1_m(
@@ -858,7 +858,7 @@ vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m1_m(
@@ -867,7 +867,7 @@ vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m2_m(
@@ -876,7 +876,7 @@ vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m2_m(
@@ -885,7 +885,7 @@ vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m4_m(
@@ -894,7 +894,7 @@ vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m4_m(
@@ -903,7 +903,7 @@ vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m8_m(
@@ -912,7 +912,7 @@ vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m8_m(
@@ -921,7 +921,7 @@ vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m(
@@ -930,7 +930,7 @@ vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m(
@@ -939,7 +939,7 @@ vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m(
@@ -948,7 +948,7 @@ vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m(
@@ -957,7 +957,7 @@ vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m1_m(
@@ -966,7 +966,7 @@ vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m1_m(
@@ -975,7 +975,7 @@ vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m2_m(
@@ -984,7 +984,7 @@ vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m2_m(
@@ -993,7 +993,7 @@ vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m4_m(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m4_m(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m8_m(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m8_m(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m1_m(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m1_m(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m2_m(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m2_m(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m4_m(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m4_m(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m8_m(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m8_m(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m1_m(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m1_m(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m2_m(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m2_m(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m4_m(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m4_m(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m8_m(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m8_m(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m1_m(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m1_m(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m2_m(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m2_m(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m4_m(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m4_m(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m8_m(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m8_m(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m1_m(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m1_m(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m2_m(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m2_m(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m4_m(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m4_m(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m8_m(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m8_m(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m1_m(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m1_m(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m2_m(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m2_m(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m4_m(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m4_m(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m8_m(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m8_m(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m1_m(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m1_m(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m2_m(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m2_m(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m4_m(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m4_m(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m8_m(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m8_m(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vand(mask, op1, op2, vl);
+ return __riscv_vand(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasub.c
index f98255c84622..36fb4bcae608 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vasub(op1, op2, vl);
+ return __riscv_vasub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vasub(mask, op1, op2, vl);
+ return __riscv_vasub(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasubu.c
index 4ea91565cb54..9bbe83e8badc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasubu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasubu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vasubu(op1, op2, vl);
+ return __riscv_vasubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vasubu(mask, op1, op2, vl);
+ return __riscv_vasubu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
index a80b8644a072..ea67a8c5c2a8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t src, vbool64_t mask, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t src, vbool32_t mask, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t src, vbool16_t mask, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t src, vbool8_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t src, vbool4_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t src, vbool4_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t src, vbool2_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t src, vbool2_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t src, vbool64_t mask, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t src, vbool32_t mask, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t src, vbool16_t mask, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t src, vbool8_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t src, vbool4_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t src, vbool4_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t src, vbool64_t mask, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t src, vbool32_t mask, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t src, vbool16_t mask, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t src, vbool8_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4(
@@ -157,7 +157,7 @@ vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t src, vbool64_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2(
@@ -166,7 +166,7 @@ vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t src, vbool32_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1(
@@ -175,7 +175,7 @@ vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t src, vbool16_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vcompress_vm_i8m1(vint8m1_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2(
@@ -184,7 +184,7 @@ vint8m1_t test_vcompress_vm_i8m1(vint8m1_t src, vbool8_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vcompress_vm_i8m2(vint8m2_t src, vbool4_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4(
@@ -193,7 +193,7 @@ vint8m2_t test_vcompress_vm_i8m2(vint8m2_t src, vbool4_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vcompress_vm_i8m4(vint8m4_t src, vbool2_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8(
@@ -202,7 +202,7 @@ vint8m4_t test_vcompress_vm_i8m4(vint8m4_t src, vbool2_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vcompress_vm_i8m8(vint8m8_t src, vbool1_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4(
@@ -211,7 +211,7 @@ vint8m8_t test_vcompress_vm_i8m8(vint8m8_t src, vbool1_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2(
@@ -220,7 +220,7 @@ vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t src, vbool64_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1(
@@ -229,7 +229,7 @@ vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t src, vbool32_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vcompress_vm_i16m1(vint16m1_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2(
@@ -238,7 +238,7 @@ vint16m1_t test_vcompress_vm_i16m1(vint16m1_t src, vbool16_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vcompress_vm_i16m2(vint16m2_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4(
@@ -247,7 +247,7 @@ vint16m2_t test_vcompress_vm_i16m2(vint16m2_t src, vbool8_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vcompress_vm_i16m4(vint16m4_t src, vbool4_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8(
@@ -256,7 +256,7 @@ vint16m4_t test_vcompress_vm_i16m4(vint16m4_t src, vbool4_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vcompress_vm_i16m8(vint16m8_t src, vbool2_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2(
@@ -265,7 +265,7 @@ vint16m8_t test_vcompress_vm_i16m8(vint16m8_t src, vbool2_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1(
@@ -274,7 +274,7 @@ vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t src, vbool64_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vcompress_vm_i32m1(vint32m1_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vcompress_vm_i32m1(vint32m1_t src, vbool32_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vcompress_vm_i32m2(vint32m2_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4(
@@ -292,7 +292,7 @@ vint32m2_t test_vcompress_vm_i32m2(vint32m2_t src, vbool16_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vcompress_vm_i32m4(vint32m4_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8(
@@ -301,7 +301,7 @@ vint32m4_t test_vcompress_vm_i32m4(vint32m4_t src, vbool8_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vcompress_vm_i32m8(vint32m8_t src, vbool4_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1(
@@ -310,7 +310,7 @@ vint32m8_t test_vcompress_vm_i32m8(vint32m8_t src, vbool4_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vcompress_vm_i64m1(vint64m1_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2(
@@ -319,7 +319,7 @@ vint64m1_t test_vcompress_vm_i64m1(vint64m1_t src, vbool64_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vcompress_vm_i64m2(vint64m2_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4(
@@ -328,7 +328,7 @@ vint64m2_t test_vcompress_vm_i64m2(vint64m2_t src, vbool32_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vcompress_vm_i64m4(vint64m4_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8(
@@ -337,7 +337,7 @@ vint64m4_t test_vcompress_vm_i64m4(vint64m4_t src, vbool16_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vcompress_vm_i64m8(vint64m8_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8(
@@ -346,7 +346,7 @@ vint64m8_t test_vcompress_vm_i64m8(vint64m8_t src, vbool8_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t src, vbool64_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t src, vbool32_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t src, vbool16_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2(
@@ -382,7 +382,7 @@ vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t src, vbool8_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t src, vbool4_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4(
@@ -391,7 +391,7 @@ vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t src, vbool4_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t src, vbool2_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8(
@@ -400,7 +400,7 @@ vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t src, vbool2_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t src, vbool1_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4(
@@ -409,7 +409,7 @@ vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t src, vbool1_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t src, vbool64_t mask, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t src, vbool32_t mask, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2(
@@ -436,7 +436,7 @@ vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t src, vbool16_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4(
@@ -445,7 +445,7 @@ vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t src, vbool8_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t src, vbool4_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8(
@@ -454,7 +454,7 @@ vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t src, vbool4_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t src, vbool2_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2(
@@ -463,7 +463,7 @@ vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t src, vbool2_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t src, vbool64_t mask, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2(
@@ -481,7 +481,7 @@ vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t src, vbool32_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4(
@@ -490,7 +490,7 @@ vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t src, vbool16_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8(
@@ -499,7 +499,7 @@ vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t src, vbool8_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t src, vbool4_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1(
@@ -508,7 +508,7 @@ vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t src, vbool4_t mask, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t src, vbool64_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2(
@@ -517,7 +517,7 @@ vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t src, vbool64_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t src, vbool32_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4(
@@ -526,7 +526,7 @@ vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t src, vbool32_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t src, vbool16_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8(
@@ -535,6 +535,6 @@ vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t src, vbool16_t mask, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) {
- return vcompress(src, mask, vl);
+ return __riscv_vcompress(src, mask, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpop.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpop.c
index 6a90c2861784..eb2227ff5932 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpop.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpop.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) {
- return vcpop(op1, vl);
+ return __riscv_vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b2(
@@ -22,7 +22,7 @@ unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) {
- return vcpop(op1, vl);
+ return __riscv_vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b4(
@@ -31,7 +31,7 @@ unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) {
- return vcpop(op1, vl);
+ return __riscv_vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b8(
@@ -40,7 +40,7 @@ unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) {
- return vcpop(op1, vl);
+ return __riscv_vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b16(
@@ -49,7 +49,7 @@ unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) {
- return vcpop(op1, vl);
+ return __riscv_vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b32(
@@ -58,7 +58,7 @@ unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) {
- return vcpop(op1, vl);
+ return __riscv_vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b64(
@@ -67,7 +67,7 @@ unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) {
- return vcpop(op1, vl);
+ return __riscv_vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b1_m(
@@ -76,7 +76,7 @@ unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
- return vcpop(mask, op1, vl);
+ return __riscv_vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b2_m(
@@ -85,7 +85,7 @@ unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
- return vcpop(mask, op1, vl);
+ return __riscv_vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b4_m(
@@ -94,7 +94,7 @@ unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
- return vcpop(mask, op1, vl);
+ return __riscv_vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b8_m(
@@ -103,7 +103,7 @@ unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
- return vcpop(mask, op1, vl);
+ return __riscv_vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b16_m(
@@ -112,7 +112,7 @@ unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
- return vcpop(mask, op1, vl);
+ return __riscv_vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b32_m(
@@ -121,7 +121,7 @@ unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
- return vcpop(mask, op1, vl);
+ return __riscv_vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b64_m(
@@ -130,6 +130,6 @@ unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
- return vcpop(mask, op1, vl);
+ return __riscv_vcpop(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdiv.c
index 701323c1a5a2..7d10e20a0654 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdiv.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vdiv(op1, op2, vl);
+ return __riscv_vdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vdiv(mask, op1, op2, vl);
+ return __riscv_vdiv(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdivu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdivu.c
index 94b0f3011096..40ceae117c3c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdivu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdivu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vdivu(op1, op2, vl);
+ return __riscv_vdivu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vdivu(mask, op1, op2, vl);
+ return __riscv_vdivu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfabs.c
index 21a328b6485e..df59f5b5caca 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfabs.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfabs.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfabs_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfabs_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfabs_v_f16m1(vfloat16m1_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfabs_v_f16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfabs_v_f16m2(vfloat16m2_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfabs_v_f16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfabs_v_f16m4(vfloat16m4_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfabs_v_f16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfabs_v_f16m8(vfloat16m8_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfabs_v_f16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfabs_v_f32m1(vfloat32m1_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfabs_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfabs_v_f32m2(vfloat32m2_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfabs_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfabs_v_f32m4(vfloat32m4_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfabs_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfabs_v_f32m8(vfloat32m8_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfabs_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfabs_v_f64m1(vfloat64m1_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfabs_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfabs_v_f64m2(vfloat64m2_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfabs_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfabs_v_f64m4(vfloat64m4_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfabs_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t op1, size_t vl) {
- return vfabs(op1, vl);
+ return __riscv_vfabs(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_m(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_m(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m2_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m4_m(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m8_m(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
- return vfabs(mask, op1, vl);
+ return __riscv_vfabs(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c
index b54a2947efcd..22d013f9a41d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfadd(op1, op2, vl);
+ return __riscv_vfadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfadd(mask, op1, op2, vl);
+ return __riscv_vfadd(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass.c
index 3f9b6bbbdf4c..76f6445e9c4e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2(
@@ -22,7 +22,7 @@ vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1(
@@ -31,7 +31,7 @@ vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2(
@@ -40,7 +40,7 @@ vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4(
@@ -49,7 +49,7 @@ vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8(
@@ -58,7 +58,7 @@ vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
@@ -67,7 +67,7 @@ vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
@@ -76,7 +76,7 @@ vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
@@ -85,7 +85,7 @@ vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
@@ -94,7 +94,7 @@ vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
@@ -103,7 +103,7 @@ vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
@@ -112,7 +112,7 @@ vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
@@ -121,7 +121,7 @@ vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
@@ -130,7 +130,7 @@ vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
@@ -139,7 +139,7 @@ vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
- return vfclass(op1, vl);
+ return __riscv_vfclass(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m(
@@ -148,7 +148,7 @@ vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m(
@@ -157,7 +157,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m(
@@ -166,7 +166,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m(
@@ -175,7 +175,7 @@ vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m(
@@ -184,7 +184,7 @@ vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m(
@@ -193,7 +193,7 @@ vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
@@ -202,7 +202,7 @@ vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
@@ -211,7 +211,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
@@ -220,7 +220,7 @@ vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
@@ -229,7 +229,7 @@ vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
@@ -238,7 +238,7 @@ vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
@@ -247,7 +247,7 @@ vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
@@ -256,7 +256,7 @@ vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
@@ -265,7 +265,7 @@ vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
@@ -274,6 +274,6 @@ vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
- return vfclass(mask, op1, vl);
+ return __riscv_vfclass(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c
index a430cd24c32e..56574b76ee7e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4(
@@ -22,7 +22,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2(
@@ -31,7 +31,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2(
@@ -40,7 +40,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1(
@@ -49,7 +49,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1(
@@ -58,7 +58,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2(
@@ -67,7 +67,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2(
@@ -76,7 +76,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4(
@@ -85,7 +85,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4(
@@ -94,7 +94,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8(
@@ -103,7 +103,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8(
@@ -112,7 +112,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4(
@@ -121,7 +121,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4(
@@ -130,7 +130,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2(
@@ -139,7 +139,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2(
@@ -148,7 +148,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1(
@@ -157,7 +157,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1(
@@ -166,7 +166,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2(
@@ -175,7 +175,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2(
@@ -184,7 +184,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4(
@@ -193,7 +193,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4(
@@ -202,7 +202,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8(
@@ -211,7 +211,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8(
@@ -220,7 +220,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4(
@@ -229,7 +229,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2(
@@ -238,7 +238,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1(
@@ -247,7 +247,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2(
@@ -256,7 +256,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4(
@@ -265,7 +265,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8(
@@ -274,7 +274,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4(
@@ -283,7 +283,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
@@ -346,7 +346,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
@@ -355,7 +355,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
@@ -364,7 +364,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
@@ -373,7 +373,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
@@ -382,7 +382,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
@@ -391,7 +391,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
@@ -400,7 +400,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
@@ -409,7 +409,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
@@ -418,7 +418,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
@@ -427,7 +427,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
@@ -436,7 +436,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
@@ -445,7 +445,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
@@ -454,7 +454,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
@@ -463,7 +463,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
@@ -472,7 +472,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
@@ -481,7 +481,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
@@ -490,7 +490,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
@@ -499,7 +499,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
@@ -508,7 +508,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
@@ -517,7 +517,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
@@ -526,7 +526,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
@@ -535,7 +535,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
@@ -544,7 +544,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
@@ -553,7 +553,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
@@ -562,7 +562,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
@@ -571,7 +571,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
@@ -580,7 +580,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
@@ -589,7 +589,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
@@ -598,7 +598,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
@@ -607,7 +607,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
@@ -616,7 +616,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
@@ -625,7 +625,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
@@ -634,7 +634,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
@@ -643,7 +643,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
@@ -652,7 +652,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
@@ -661,7 +661,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
- return vfcvt_x(src, vl);
+ return __riscv_vfcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
@@ -670,7 +670,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_x(src, vl);
+ return __riscv_vfcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
@@ -679,7 +679,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
@@ -688,7 +688,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
@@ -697,7 +697,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
@@ -706,7 +706,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
@@ -715,7 +715,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
@@ -724,7 +724,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
@@ -733,7 +733,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
- return vfcvt_xu(src, vl);
+ return __riscv_vfcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
@@ -742,7 +742,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_xu(src, vl);
+ return __riscv_vfcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
@@ -751,7 +751,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
@@ -769,7 +769,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
@@ -778,7 +778,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
@@ -787,7 +787,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
@@ -796,7 +796,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
@@ -805,7 +805,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
@@ -814,7 +814,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
- return vfcvt_f(src, vl);
+ return __riscv_vfcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_m(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m(
@@ -832,7 +832,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m(
@@ -841,7 +841,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m(
@@ -850,7 +850,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m(
@@ -859,7 +859,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m(
@@ -868,7 +868,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m(
@@ -877,7 +877,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m(
@@ -886,7 +886,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m(
@@ -895,7 +895,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m(
@@ -904,7 +904,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m(
@@ -913,7 +913,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m(
@@ -922,7 +922,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m(
@@ -931,7 +931,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m(
@@ -940,7 +940,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m(
@@ -967,7 +967,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m(
@@ -976,7 +976,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m(
@@ -985,7 +985,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m(
@@ -994,7 +994,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m(
@@ -1003,7 +1003,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m(
@@ -1012,7 +1012,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m(
@@ -1021,7 +1021,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m(
@@ -1030,7 +1030,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m(
@@ -1039,7 +1039,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vint16mf4_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_m(
@@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vint16mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vint16mf2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_m(
@@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vint16mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_m(
@@ -1066,7 +1066,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_m(
@@ -1075,7 +1075,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_m(
@@ -1084,7 +1084,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_m(
@@ -1093,7 +1093,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_m(
@@ -1102,7 +1102,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint16mf4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_m(
@@ -1111,7 +1111,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint16mf2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_m(
@@ -1120,7 +1120,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_m(
@@ -1129,7 +1129,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_m(
@@ -1138,7 +1138,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m(
@@ -1147,7 +1147,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m(
@@ -1156,7 +1156,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m(
@@ -1165,7 +1165,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m(
@@ -1174,7 +1174,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m(
@@ -1183,7 +1183,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m(
@@ -1192,7 +1192,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m(
@@ -1201,7 +1201,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m(
@@ -1210,7 +1210,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m(
@@ -1219,7 +1219,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m(
@@ -1228,7 +1228,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m(
@@ -1237,7 +1237,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m(
@@ -1246,7 +1246,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m(
@@ -1255,7 +1255,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m(
@@ -1264,7 +1264,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m(
@@ -1273,7 +1273,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m(
@@ -1282,7 +1282,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m(
@@ -1291,7 +1291,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m(
@@ -1300,7 +1300,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m(
@@ -1309,7 +1309,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m(
@@ -1318,7 +1318,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m(
@@ -1327,7 +1327,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vint32mf2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m(
@@ -1336,7 +1336,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vint32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m(
@@ -1345,7 +1345,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m(
@@ -1354,7 +1354,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m(
@@ -1363,7 +1363,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m(
@@ -1372,7 +1372,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m(
@@ -1381,7 +1381,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint32mf2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m(
@@ -1390,7 +1390,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m(
@@ -1399,7 +1399,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m(
@@ -1408,7 +1408,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m(
@@ -1417,7 +1417,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m(
@@ -1426,7 +1426,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m(
@@ -1435,7 +1435,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m(
@@ -1444,7 +1444,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m(
@@ -1453,7 +1453,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m(
@@ -1462,7 +1462,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m(
@@ -1471,7 +1471,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfcvt_x(mask, src, vl);
+ return __riscv_vfcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m(
@@ -1480,7 +1480,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_x(mask, src, vl);
+ return __riscv_vfcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m(
@@ -1489,7 +1489,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m(
@@ -1498,7 +1498,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m(
@@ -1507,7 +1507,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m(
@@ -1516,7 +1516,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m(
@@ -1525,7 +1525,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m(
@@ -1534,7 +1534,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m(
@@ -1543,7 +1543,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfcvt_xu(mask, src, vl);
+ return __riscv_vfcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m(
@@ -1552,7 +1552,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m(
@@ -1561,7 +1561,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m(
@@ -1570,7 +1570,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m(
@@ -1579,7 +1579,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m(
@@ -1588,7 +1588,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m(
@@ -1597,7 +1597,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m(
@@ -1606,7 +1606,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m(
@@ -1615,7 +1615,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m(
@@ -1624,6 +1624,6 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint64m8_t src, size_t vl) {
- return vfcvt_f(mask, src, vl);
+ return __riscv_vfcvt_f(mask, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfdiv.c
index 7fe971d24f44..b3e43fe0d792 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfdiv.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfdiv(op1, op2, vl);
+ return __riscv_vfdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfdiv(mask, op1, op2, vl);
+ return __riscv_vfdiv(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfirst.c
index ca53cfa9c8f7..4595e4cb3a33 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfirst.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfirst.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b1(vbool1_t op1, size_t vl) {
- return vfirst(op1, vl);
+ return __riscv_vfirst(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b2(
@@ -21,7 +21,7 @@ long test_vfirst_m_b1(vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b2(vbool2_t op1, size_t vl) {
- return vfirst(op1, vl);
+ return __riscv_vfirst(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b4(
@@ -30,7 +30,7 @@ long test_vfirst_m_b2(vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b4(vbool4_t op1, size_t vl) {
- return vfirst(op1, vl);
+ return __riscv_vfirst(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b8(
@@ -39,7 +39,7 @@ long test_vfirst_m_b4(vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b8(vbool8_t op1, size_t vl) {
- return vfirst(op1, vl);
+ return __riscv_vfirst(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b16(
@@ -48,7 +48,7 @@ long test_vfirst_m_b8(vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b16(vbool16_t op1, size_t vl) {
- return vfirst(op1, vl);
+ return __riscv_vfirst(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b32(
@@ -57,7 +57,7 @@ long test_vfirst_m_b16(vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b32(vbool32_t op1, size_t vl) {
- return vfirst(op1, vl);
+ return __riscv_vfirst(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b64(
@@ -66,7 +66,7 @@ long test_vfirst_m_b32(vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b64(vbool64_t op1, size_t vl) {
- return vfirst(op1, vl);
+ return __riscv_vfirst(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b1_m(
@@ -75,7 +75,7 @@ long test_vfirst_m_b64(vbool64_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
- return vfirst(mask, op1, vl);
+ return __riscv_vfirst(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b2_m(
@@ -84,7 +84,7 @@ long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
- return vfirst(mask, op1, vl);
+ return __riscv_vfirst(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b4_m(
@@ -93,7 +93,7 @@ long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
- return vfirst(mask, op1, vl);
+ return __riscv_vfirst(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b8_m(
@@ -102,7 +102,7 @@ long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
- return vfirst(mask, op1, vl);
+ return __riscv_vfirst(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b16_m(
@@ -111,7 +111,7 @@ long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
- return vfirst(mask, op1, vl);
+ return __riscv_vfirst(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b32_m(
@@ -120,7 +120,7 @@ long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
- return vfirst(mask, op1, vl);
+ return __riscv_vfirst(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfirst_m_b64_m(
@@ -129,6 +129,6 @@ long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
- return vfirst(mask, op1, vl);
+ return __riscv_vfirst(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c
index 411f76727c2a..664f6eb94a1e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc(vd, vs1, vs2, vl);
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc(vd, rs1, vs2, vl);
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c
index 3db5644b7a13..efe7362feee6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd(vd, vs1, vs2, vl);
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd(vd, rs1, vs2, vl);
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax.c
index 9b34311bb892..f93ab368329f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfmax(op1, op2, vl);
+ return __riscv_vfmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmax(mask, op1, op2, vl);
+ return __riscv_vfmax(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c
index 135cfeb0a36b..51c212ed4c1c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, _Float16 op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, _Float16 op2, vbool32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, _Float16 op2, vbool16_t ma
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, _Float16 op2, vbool8_t mas
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, _Float16 op2, vbool4_t mas
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, _Float16 op2, vbool2_t mas
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float op2, vbool64_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float op2, vbool32_t mask,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float op2, vbool16_t mask,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float op2, vbool8_t mask,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float op2, vbool4_t mask,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, double op2, vbool64_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, double op2, vbool32_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8(
@@ -139,6 +139,6 @@ vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, double op2, vbool16_t mask
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) {
- return vfmerge(op1, op2, mask, vl);
+ return __riscv_vfmerge(op1, op2, mask, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin.c
index a9af52783475..b43027bc15fe 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfmin(op1, op2, vl);
+ return __riscv_vfmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmin(mask, op1, op2, vl);
+ return __riscv_vfmin(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c
index 76eb18ea206c..4a3330a8c991 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac(vd, vs1, vs2, vl);
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac(vd, rs1, vs2, vl);
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c
index 06c70b406b18..9fcebddf62ac 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub(vd, vs1, vs2, vl);
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub(vd, rs1, vs2, vl);
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul.c
index 8246aa212af1..7224559ff86b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfmul(op1, op2, vl);
+ return __riscv_vfmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmul(mask, op1, op2, vl);
+ return __riscv_vfmul(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c
index a86aa2f44cc2..6684b0f0db93 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret half [[TMP0]]
//
_Float16 test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16(
@@ -22,7 +22,7 @@ _Float16 test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t src) {
// CHECK-RV64-NEXT: ret half [[TMP0]]
//
_Float16 test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16(
@@ -31,7 +31,7 @@ _Float16 test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t src) {
// CHECK-RV64-NEXT: ret half [[TMP0]]
//
_Float16 test_vfmv_f_s_f16m1_f16(vfloat16m1_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16(
@@ -40,7 +40,7 @@ _Float16 test_vfmv_f_s_f16m1_f16(vfloat16m1_t src) {
// CHECK-RV64-NEXT: ret half [[TMP0]]
//
_Float16 test_vfmv_f_s_f16m2_f16(vfloat16m2_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16(
@@ -49,7 +49,7 @@ _Float16 test_vfmv_f_s_f16m2_f16(vfloat16m2_t src) {
// CHECK-RV64-NEXT: ret half [[TMP0]]
//
_Float16 test_vfmv_f_s_f16m4_f16(vfloat16m4_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16(
@@ -58,7 +58,7 @@ _Float16 test_vfmv_f_s_f16m4_f16(vfloat16m4_t src) {
// CHECK-RV64-NEXT: ret half [[TMP0]]
//
_Float16 test_vfmv_f_s_f16m8_f16(vfloat16m8_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32(
@@ -67,7 +67,7 @@ _Float16 test_vfmv_f_s_f16m8_f16(vfloat16m8_t src) {
// CHECK-RV64-NEXT: ret float [[TMP0]]
//
float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32(
@@ -76,7 +76,7 @@ float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) {
// CHECK-RV64-NEXT: ret float [[TMP0]]
//
float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32(
@@ -85,7 +85,7 @@ float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) {
// CHECK-RV64-NEXT: ret float [[TMP0]]
//
float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32(
@@ -94,7 +94,7 @@ float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) {
// CHECK-RV64-NEXT: ret float [[TMP0]]
//
float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32(
@@ -103,7 +103,7 @@ float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) {
// CHECK-RV64-NEXT: ret float [[TMP0]]
//
float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64(
@@ -112,7 +112,7 @@ float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) {
// CHECK-RV64-NEXT: ret double [[TMP0]]
//
double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64(
@@ -121,7 +121,7 @@ double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) {
// CHECK-RV64-NEXT: ret double [[TMP0]]
//
double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64(
@@ -130,7 +130,7 @@ double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) {
// CHECK-RV64-NEXT: ret double [[TMP0]]
//
double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64(
@@ -139,6 +139,6 @@ double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) {
// CHECK-RV64-NEXT: ret double [[TMP0]]
//
double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) {
- return vfmv_f(src);
+ return __riscv_vfmv_f(src);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c
index fb31f477ba0a..d97579dabf36 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8(
@@ -22,7 +22,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4(
@@ -31,7 +31,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4(
@@ -40,7 +40,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2(
@@ -49,7 +49,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2(
@@ -58,7 +58,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1(
@@ -67,7 +67,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1(
@@ -76,7 +76,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2(
@@ -85,7 +85,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2(
@@ -94,7 +94,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4(
@@ -103,7 +103,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4(
@@ -112,7 +112,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8(
@@ -121,7 +121,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8(
@@ -130,7 +130,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4(
@@ -139,7 +139,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4(
@@ -148,7 +148,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2(
@@ -157,7 +157,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2(
@@ -166,7 +166,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1(
@@ -175,7 +175,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1(
@@ -184,7 +184,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2(
@@ -193,7 +193,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2(
@@ -202,7 +202,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4(
@@ -211,7 +211,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4(
@@ -220,7 +220,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
@@ -229,7 +229,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
@@ -238,7 +238,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
@@ -247,7 +247,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
@@ -256,7 +256,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
@@ -265,7 +265,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
@@ -274,7 +274,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
@@ -283,7 +283,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
@@ -292,7 +292,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
@@ -301,7 +301,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
@@ -310,7 +310,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
@@ -319,7 +319,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
@@ -328,7 +328,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
@@ -337,7 +337,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
@@ -346,7 +346,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
@@ -355,7 +355,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
@@ -364,7 +364,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
@@ -373,7 +373,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
@@ -382,7 +382,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
@@ -391,7 +391,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
@@ -400,7 +400,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4(
@@ -409,7 +409,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2(
@@ -418,7 +418,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1(
@@ -427,7 +427,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2(
@@ -436,7 +436,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4(
@@ -445,7 +445,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4(
@@ -454,7 +454,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2(
@@ -463,7 +463,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1(
@@ -472,7 +472,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2(
@@ -481,7 +481,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4(
@@ -490,7 +490,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4(
@@ -499,7 +499,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4(
@@ -508,7 +508,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) {
- return vfncvt_rod_f(src, vl);
+ return __riscv_vfncvt_rod_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2(
@@ -517,7 +517,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2(
@@ -526,7 +526,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) {
- return vfncvt_rod_f(src, vl);
+ return __riscv_vfncvt_rod_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1(
@@ -535,7 +535,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1(
@@ -544,7 +544,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) {
- return vfncvt_rod_f(src, vl);
+ return __riscv_vfncvt_rod_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2(
@@ -553,7 +553,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2(
@@ -562,7 +562,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) {
- return vfncvt_rod_f(src, vl);
+ return __riscv_vfncvt_rod_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4(
@@ -571,7 +571,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4(
@@ -580,7 +580,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) {
- return vfncvt_rod_f(src, vl);
+ return __riscv_vfncvt_rod_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
@@ -598,7 +598,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
@@ -607,7 +607,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
@@ -616,7 +616,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
@@ -625,7 +625,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
@@ -634,7 +634,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
@@ -643,7 +643,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
- return vfncvt_x(src, vl);
+ return __riscv_vfncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
@@ -652,7 +652,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_x(src, vl);
+ return __riscv_vfncvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
@@ -661,7 +661,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
@@ -670,7 +670,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
@@ -679,7 +679,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
@@ -688,7 +688,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
@@ -697,7 +697,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
@@ -706,7 +706,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
@@ -715,7 +715,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
- return vfncvt_xu(src, vl);
+ return __riscv_vfncvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
@@ -724,7 +724,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_xu(src, vl);
+ return __riscv_vfncvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
@@ -733,7 +733,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
@@ -742,7 +742,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
@@ -751,7 +751,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
@@ -760,7 +760,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
@@ -769,7 +769,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
@@ -778,7 +778,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
@@ -787,7 +787,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
@@ -796,7 +796,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
@@ -805,7 +805,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
@@ -814,7 +814,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
- return vfncvt_rod_f(src, vl);
+ return __riscv_vfncvt_rod_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
@@ -823,7 +823,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
@@ -832,7 +832,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
- return vfncvt_rod_f(src, vl);
+ return __riscv_vfncvt_rod_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
@@ -841,7 +841,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
@@ -850,7 +850,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
- return vfncvt_rod_f(src, vl);
+ return __riscv_vfncvt_rod_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
@@ -859,7 +859,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
- return vfncvt_f(src, vl);
+ return __riscv_vfncvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
@@ -868,7 +868,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
- return vfncvt_rod_f(src, vl);
+ return __riscv_vfncvt_rod_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m(
@@ -877,7 +877,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m(
@@ -886,7 +886,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m(
@@ -895,7 +895,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m(
@@ -904,7 +904,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m(
@@ -913,7 +913,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m(
@@ -922,7 +922,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m(
@@ -931,7 +931,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m(
@@ -940,7 +940,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m(
@@ -949,7 +949,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m(
@@ -958,7 +958,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m(
@@ -967,7 +967,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m(
@@ -976,7 +976,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m(
@@ -985,7 +985,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m(
@@ -994,7 +994,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m(
@@ -1003,7 +1003,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m(
@@ -1012,7 +1012,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m(
@@ -1021,7 +1021,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m(
@@ -1030,7 +1030,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m(
@@ -1039,7 +1039,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m(
@@ -1048,7 +1048,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m(
@@ -1057,7 +1057,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m(
@@ -1066,7 +1066,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m(
@@ -1075,7 +1075,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m(
@@ -1084,7 +1084,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m(
@@ -1093,7 +1093,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m(
@@ -1102,7 +1102,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m(
@@ -1111,7 +1111,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m(
@@ -1120,7 +1120,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m(
@@ -1129,7 +1129,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m(
@@ -1138,7 +1138,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m(
@@ -1147,7 +1147,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m(
@@ -1156,7 +1156,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m(
@@ -1165,7 +1165,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m(
@@ -1174,7 +1174,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m(
@@ -1183,7 +1183,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m(
@@ -1192,7 +1192,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m(
@@ -1201,7 +1201,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m(
@@ -1210,7 +1210,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m(
@@ -1219,7 +1219,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m(
@@ -1228,7 +1228,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m(
@@ -1237,7 +1237,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m(
@@ -1246,7 +1246,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m(
@@ -1255,7 +1255,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m(
@@ -1264,7 +1264,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m(
@@ -1273,7 +1273,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_m(
@@ -1282,7 +1282,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vint32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_m(
@@ -1291,7 +1291,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vint32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_m(
@@ -1300,7 +1300,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_m(
@@ -1309,7 +1309,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_m(
@@ -1318,7 +1318,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_m(
@@ -1327,7 +1327,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vuint32mf2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_m(
@@ -1336,7 +1336,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vuint32m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_m(
@@ -1345,7 +1345,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_m(
@@ -1354,7 +1354,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_m(
@@ -1363,7 +1363,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m(
@@ -1372,7 +1372,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rod_f(mask, src, vl);
+ return __riscv_vfncvt_rod_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m(
@@ -1381,7 +1381,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m(
@@ -1390,7 +1390,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfncvt_rod_f(mask, src, vl);
+ return __riscv_vfncvt_rod_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m(
@@ -1399,7 +1399,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m(
@@ -1408,7 +1408,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfncvt_rod_f(mask, src, vl);
+ return __riscv_vfncvt_rod_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m(
@@ -1417,7 +1417,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m(
@@ -1426,7 +1426,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfncvt_rod_f(mask, src, vl);
+ return __riscv_vfncvt_rod_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m(
@@ -1435,7 +1435,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m(
@@ -1444,7 +1444,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) {
- return vfncvt_rod_f(mask, src, vl);
+ return __riscv_vfncvt_rod_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m(
@@ -1453,7 +1453,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m(
@@ -1462,7 +1462,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m(
@@ -1471,7 +1471,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m(
@@ -1480,7 +1480,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m(
@@ -1489,7 +1489,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m(
@@ -1498,7 +1498,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m(
@@ -1507,7 +1507,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfncvt_x(mask, src, vl);
+ return __riscv_vfncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m(
@@ -1516,7 +1516,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_x(mask, src, vl);
+ return __riscv_vfncvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m(
@@ -1525,7 +1525,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m(
@@ -1534,7 +1534,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m(
@@ -1543,7 +1543,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m(
@@ -1552,7 +1552,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m(
@@ -1561,7 +1561,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m(
@@ -1570,7 +1570,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m(
@@ -1579,7 +1579,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfncvt_xu(mask, src, vl);
+ return __riscv_vfncvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m(
@@ -1588,7 +1588,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_xu(mask, src, vl);
+ return __riscv_vfncvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m(
@@ -1597,7 +1597,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m(
@@ -1606,7 +1606,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vint64m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m(
@@ -1615,7 +1615,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m(
@@ -1624,7 +1624,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m(
@@ -1633,7 +1633,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m(
@@ -1642,7 +1642,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vuint64m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m(
@@ -1651,7 +1651,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m(
@@ -1660,7 +1660,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m(
@@ -1669,7 +1669,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m(
@@ -1678,7 +1678,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) {
- return vfncvt_rod_f(mask, src, vl);
+ return __riscv_vfncvt_rod_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m(
@@ -1687,7 +1687,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m(
@@ -1696,7 +1696,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) {
- return vfncvt_rod_f(mask, src, vl);
+ return __riscv_vfncvt_rod_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m(
@@ -1705,7 +1705,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m(
@@ -1714,7 +1714,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) {
- return vfncvt_rod_f(mask, src, vl);
+ return __riscv_vfncvt_rod_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m(
@@ -1723,7 +1723,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfncvt_f(mask, src, vl);
+ return __riscv_vfncvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m(
@@ -1732,6 +1732,6 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) {
- return vfncvt_rod_f(mask, src, vl);
+ return __riscv_vfncvt_rod_f(mask, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfneg.c
index 5b2efe69041d..661138b837df 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfneg.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfneg_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfneg_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfneg_v_f16m1(vfloat16m1_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfneg_v_f16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfneg_v_f16m2(vfloat16m2_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfneg_v_f16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfneg_v_f16m4(vfloat16m4_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfneg_v_f16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfneg_v_f16m8(vfloat16m8_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfneg_v_f16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfneg_v_f32m1(vfloat32m1_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfneg_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfneg_v_f32m2(vfloat32m2_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfneg_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfneg_v_f32m4(vfloat32m4_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfneg_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfneg_v_f32m8(vfloat32m8_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfneg_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfneg_v_f64m1(vfloat64m1_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfneg_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfneg_v_f64m2(vfloat64m2_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfneg_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfneg_v_f64m4(vfloat64m4_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfneg_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t op1, size_t vl) {
- return vfneg(op1, vl);
+ return __riscv_vfneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_m(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_m(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_m(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_m(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
- return vfneg(mask, op1, vl);
+ return __riscv_vfneg(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c
index 68de7fb43780..de34a0702fab 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c
index 0326845d58b6..422e8294022d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c
index 405e9b03b797..dc343eb013b5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c
index 5106bbc9e55f..55120a3a8632 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrdiv.c
index d3c5db98724d..64e2b57b7610 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrdiv.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfrdiv(op1, op2, vl);
+ return __riscv_vfrdiv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_m(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_m(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_m(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_m(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float o
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrdiv(mask, op1, op2, vl);
+ return __riscv_vfrdiv(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c
index 60c95c124b2e..d2fc8b575e50 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
- return vfrec7(op1, vl);
+ return __riscv_vfrec7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_m(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_m(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_m(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_m(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
- return vfrec7(mask, op1, vl);
+ return __riscv_vfrec7(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c
index 200524af4a21..2728cf2fc916 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1(
@@ -22,7 +22,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1(
@@ -49,7 +49,7 @@ vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
@@ -94,7 +94,7 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
@@ -103,7 +103,7 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
@@ -112,7 +112,7 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
@@ -139,7 +139,7 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax(vector, scalar, vl);
+ return __riscv_vfredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m(
@@ -148,7 +148,7 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m(
@@ -157,7 +157,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m(
@@ -184,7 +184,7 @@ vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m(
@@ -193,7 +193,7 @@ vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
@@ -238,7 +238,7 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
@@ -265,7 +265,7 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
@@ -274,6 +274,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax(mask, vector, scalar, vl);
+ return __riscv_vfredmax(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c
index c6ba7d952c27..a8985894b1af 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1(
@@ -22,7 +22,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1(
@@ -49,7 +49,7 @@ vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
@@ -94,7 +94,7 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
@@ -103,7 +103,7 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
@@ -112,7 +112,7 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
@@ -139,7 +139,7 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin(vector, scalar, vl);
+ return __riscv_vfredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m(
@@ -148,7 +148,7 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m(
@@ -157,7 +157,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m(
@@ -184,7 +184,7 @@ vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m(
@@ -193,7 +193,7 @@ vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
@@ -238,7 +238,7 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
@@ -265,7 +265,7 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
@@ -274,6 +274,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin(mask, vector, scalar, vl);
+ return __riscv_vfredmin(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c
index 78056ff02dd4..23969bfe26df 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1(
@@ -22,7 +22,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1(
@@ -49,7 +49,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
@@ -94,7 +94,7 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
@@ -103,7 +103,7 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
@@ -112,7 +112,7 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
@@ -139,7 +139,7 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum(vector, scalar, vl);
+ return __riscv_vfredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m(
@@ -148,7 +148,7 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m(
@@ -157,7 +157,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m(
@@ -184,7 +184,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m(
@@ -193,7 +193,7 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vect
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
@@ -238,7 +238,7 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
@@ -265,7 +265,7 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
@@ -274,6 +274,6 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum(mask, vector, scalar, vl);
+ return __riscv_vfredosum(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c
index 5ec99783681d..35697fad7176 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1(
@@ -22,7 +22,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1(
@@ -49,7 +49,7 @@ vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1(
@@ -94,7 +94,7 @@ vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1(
@@ -103,7 +103,7 @@ vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1(
@@ -112,7 +112,7 @@ vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1(
@@ -139,7 +139,7 @@ vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum(vector, scalar, vl);
+ return __riscv_vfredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m(
@@ -148,7 +148,7 @@ vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t sca
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m(
@@ -157,7 +157,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m(
@@ -184,7 +184,7 @@ vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m(
@@ -193,7 +193,7 @@ vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vect
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m(
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m(
@@ -238,7 +238,7 @@ vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m(
@@ -265,7 +265,7 @@ vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m(
@@ -274,6 +274,6 @@ vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum(mask, vector, scalar, vl);
+ return __riscv_vfredusum(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7.c
index 90ca60cdfecd..9ec788dc59e4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
- return vfrsqrt7(op1, vl);
+ return __riscv_vfrsqrt7(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_m(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_m(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_m(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_m(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
- return vfrsqrt7(mask, op1, vl);
+ return __riscv_vfrsqrt7(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub.c
index 82453fa18077..e673a73728cc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfrsub(op1, op2, vl);
+ return __riscv_vfrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_m(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_m(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_m(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_m(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float o
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrsub(mask, op1, op2, vl);
+ return __riscv_vfrsub(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj.c
index e61127421c60..6dbbb406df65 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnj(op1, op2, vl);
+ return __riscv_vfsgnj(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float o
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnj(mask, op1, op2, vl);
+ return __riscv_vfsgnj(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn.c
index c93afe84467b..3a7e940f74c4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjn(op1, op2, vl);
+ return __riscv_vfsgnjn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjn(mask, op1, op2, vl);
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx.c
index 5923ca756afc..8376edd531c5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjx(op1, op2, vl);
+ return __riscv_vfsgnjx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjx(mask, op1, op2, vl);
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down.c
index 68fb93fd497f..87df7b84ca78 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1down_vf_f16mf4(vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4(vfloat16mf4_t src, _Float16 value, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1down_vf_f16mf2(vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2(vfloat16mf2_t src, _Float16 value, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1down_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1down_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1down_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1down_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, size_t vl) {
- return vfslide1down(src, value, vl);
+ return __riscv_vfslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_m(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_m(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Flo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_m(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Floa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_m(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Floa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Floa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, f
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, floa
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, floa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, doub
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, doub
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, doub
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1down(mask, src, value, vl);
+ return __riscv_vfslide1down(mask, src, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up.c
index cbfdd1d91f58..65aa66962063 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1up_vf_f16mf4(vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4(vfloat16mf4_t src, _Float16 value, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1up_vf_f16mf2(vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2(vfloat16mf2_t src, _Float16 value, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1up_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1up_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1up_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1up_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, size_t vl) {
- return vfslide1up(src, value, vl);
+ return __riscv_vfslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_m(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_m(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Fl
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Fl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_m(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_m(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, flo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1up(mask, src, value, vl);
+ return __riscv_vfslide1up(mask, src, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c
index eede089062c9..3aa773373935 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
- return vfsqrt(op1, vl);
+ return __riscv_vfsqrt(op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_m(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_m(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_m(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_m(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_m(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_m(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
- return vfsqrt(mask, op1, vl);
+ return __riscv_vfsqrt(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub.c
index 5a6361617fd5..3109981bbea0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
- return vfsub(op1, op2, vl);
+ return __riscv_vfsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_m(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_m(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_m(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_m(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_m(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_m(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_m(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_m(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_m(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_m(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_m(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_m(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m(
@@ -544,6 +544,6 @@ vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsub(mask, op1, op2, vl);
+ return __riscv_vfsub(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd.c
index b8d1c3854408..201ed73f9de1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_vv(op1, op2, vl);
+ return __riscv_vfwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(op1, op2, vl);
+ return __riscv_vfwadd_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_wv(op1, op2, vl);
+ return __riscv_vfwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2(
@@ -40,7 +40,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(op1, op2, vl);
+ return __riscv_vfwadd_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1(
@@ -49,7 +49,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_vv(op1, op2, vl);
+ return __riscv_vfwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1(
@@ -58,7 +58,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(op1, op2, vl);
+ return __riscv_vfwadd_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_wv(op1, op2, vl);
+ return __riscv_vfwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(op1, op2, vl);
+ return __riscv_vfwadd_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_vv(op1, op2, vl);
+ return __riscv_vfwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(op1, op2, vl);
+ return __riscv_vfwadd_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2(
@@ -103,7 +103,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_wv(op1, op2, vl);
+ return __riscv_vfwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2(
@@ -112,7 +112,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(op1, op2, vl);
+ return __riscv_vfwadd_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4(
@@ -121,7 +121,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_vv(op1, op2, vl);
+ return __riscv_vfwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4(
@@ -130,7 +130,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(op1, op2, vl);
+ return __riscv_vfwadd_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4(
@@ -139,7 +139,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_wv(op1, op2, vl);
+ return __riscv_vfwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4(
@@ -148,7 +148,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(op1, op2, vl);
+ return __riscv_vfwadd_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8(
@@ -157,7 +157,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_vv(op1, op2, vl);
+ return __riscv_vfwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8(
@@ -166,7 +166,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(op1, op2, vl);
+ return __riscv_vfwadd_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8(
@@ -175,7 +175,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_wv(op1, op2, vl);
+ return __riscv_vfwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8(
@@ -184,7 +184,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(op1, op2, vl);
+ return __riscv_vfwadd_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
@@ -193,7 +193,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_vv(op1, op2, vl);
+ return __riscv_vfwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
@@ -202,7 +202,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwadd_vf(op1, op2, vl);
+ return __riscv_vfwadd_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
@@ -211,7 +211,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_wv(op1, op2, vl);
+ return __riscv_vfwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
- return vfwadd_wf(op1, op2, vl);
+ return __riscv_vfwadd_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_vv(op1, op2, vl);
+ return __riscv_vfwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
- return vfwadd_vf(op1, op2, vl);
+ return __riscv_vfwadd_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_wv(op1, op2, vl);
+ return __riscv_vfwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
@@ -256,7 +256,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
- return vfwadd_wf(op1, op2, vl);
+ return __riscv_vfwadd_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_vv(op1, op2, vl);
+ return __riscv_vfwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
- return vfwadd_vf(op1, op2, vl);
+ return __riscv_vfwadd_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
@@ -283,7 +283,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_wv(op1, op2, vl);
+ return __riscv_vfwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
@@ -292,7 +292,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
- return vfwadd_wf(op1, op2, vl);
+ return __riscv_vfwadd_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
@@ -301,7 +301,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_vv(op1, op2, vl);
+ return __riscv_vfwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
- return vfwadd_vf(op1, op2, vl);
+ return __riscv_vfwadd_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
@@ -319,7 +319,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_wv(op1, op2, vl);
+ return __riscv_vfwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
- return vfwadd_wf(op1, op2, vl);
+ return __riscv_vfwadd_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_m(
@@ -337,7 +337,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_vv(mask, op1, op2, vl);
+ return __riscv_vfwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_m(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(mask, op1, op2, vl);
+ return __riscv_vfwadd_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_m(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_wv(mask, op1, op2, vl);
+ return __riscv_vfwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_m(
@@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(mask, op1, op2, vl);
+ return __riscv_vfwadd_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_m(
@@ -373,7 +373,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_vv(mask, op1, op2, vl);
+ return __riscv_vfwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_m(
@@ -382,7 +382,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(mask, op1, op2, vl);
+ return __riscv_vfwadd_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_m(
@@ -391,7 +391,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_wv(mask, op1, op2, vl);
+ return __riscv_vfwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_m(
@@ -400,7 +400,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(mask, op1, op2, vl);
+ return __riscv_vfwadd_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_m(
@@ -409,7 +409,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_vv(mask, op1, op2, vl);
+ return __riscv_vfwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_m(
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(mask, op1, op2, vl);
+ return __riscv_vfwadd_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_wv(mask, op1, op2, vl);
+ return __riscv_vfwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(mask, op1, op2, vl);
+ return __riscv_vfwadd_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_vv(mask, op1, op2, vl);
+ return __riscv_vfwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(mask, op1, op2, vl);
+ return __riscv_vfwadd_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_wv(mask, op1, op2, vl);
+ return __riscv_vfwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_m(
@@ -472,7 +472,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(mask, op1, op2, vl);
+ return __riscv_vfwadd_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_m(
@@ -481,7 +481,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_vv(mask, op1, op2, vl);
+ return __riscv_vfwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_m(
@@ -490,7 +490,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf(mask, op1, op2, vl);
+ return __riscv_vfwadd_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_m(
@@ -499,7 +499,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_wv(mask, op1, op2, vl);
+ return __riscv_vfwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_m(
@@ -508,7 +508,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf(mask, op1, op2, vl);
+ return __riscv_vfwadd_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_vv(mask, op1, op2, vl);
+ return __riscv_vfwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwadd_vf(mask, op1, op2, vl);
+ return __riscv_vfwadd_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m(
@@ -535,7 +535,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_wv(mask, op1, op2, vl);
+ return __riscv_vfwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m(
@@ -544,7 +544,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwadd_wf(mask, op1, op2, vl);
+ return __riscv_vfwadd_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m(
@@ -553,7 +553,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_vv(mask, op1, op2, vl);
+ return __riscv_vfwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m(
@@ -562,7 +562,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwadd_vf(mask, op1, op2, vl);
+ return __riscv_vfwadd_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m(
@@ -571,7 +571,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_wv(mask, op1, op2, vl);
+ return __riscv_vfwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m(
@@ -580,7 +580,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwadd_wf(mask, op1, op2, vl);
+ return __riscv_vfwadd_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m(
@@ -589,7 +589,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_vv(mask, op1, op2, vl);
+ return __riscv_vfwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m(
@@ -598,7 +598,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwadd_vf(mask, op1, op2, vl);
+ return __riscv_vfwadd_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m(
@@ -607,7 +607,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_wv(mask, op1, op2, vl);
+ return __riscv_vfwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m(
@@ -616,7 +616,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwadd_wf(mask, op1, op2, vl);
+ return __riscv_vfwadd_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m(
@@ -625,7 +625,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_vv(mask, op1, op2, vl);
+ return __riscv_vfwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m(
@@ -634,7 +634,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwadd_vf(mask, op1, op2, vl);
+ return __riscv_vfwadd_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m(
@@ -643,7 +643,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_wv(mask, op1, op2, vl);
+ return __riscv_vfwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m(
@@ -652,6 +652,6 @@ vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwadd_wf(mask, op1, op2, vl);
+ return __riscv_vfwadd_wf(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c
index 4c2b0db95a22..ab35d62f7351 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2(
@@ -76,7 +76,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1(
@@ -85,7 +85,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2(
@@ -94,7 +94,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4(
@@ -103,7 +103,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8(
@@ -112,7 +112,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) {
- return vfwcvt_x(src, vl);
+ return __riscv_vfwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2(
@@ -130,7 +130,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_x(src, vl);
+ return __riscv_vfwcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1(
@@ -139,7 +139,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) {
- return vfwcvt_x(src, vl);
+ return __riscv_vfwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1(
@@ -148,7 +148,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_x(src, vl);
+ return __riscv_vfwcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2(
@@ -157,7 +157,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) {
- return vfwcvt_x(src, vl);
+ return __riscv_vfwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2(
@@ -166,7 +166,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_x(src, vl);
+ return __riscv_vfwcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4(
@@ -175,7 +175,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) {
- return vfwcvt_x(src, vl);
+ return __riscv_vfwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4(
@@ -184,7 +184,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_x(src, vl);
+ return __riscv_vfwcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8(
@@ -193,7 +193,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) {
- return vfwcvt_x(src, vl);
+ return __riscv_vfwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8(
@@ -202,7 +202,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_x(src, vl);
+ return __riscv_vfwcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2(
@@ -211,7 +211,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) {
- return vfwcvt_xu(src, vl);
+ return __riscv_vfwcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2(
@@ -220,7 +220,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_xu(src, vl);
+ return __riscv_vfwcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1(
@@ -229,7 +229,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) {
- return vfwcvt_xu(src, vl);
+ return __riscv_vfwcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1(
@@ -238,7 +238,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu(src, vl);
+ return __riscv_vfwcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2(
@@ -247,7 +247,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) {
- return vfwcvt_xu(src, vl);
+ return __riscv_vfwcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2(
@@ -256,7 +256,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_xu(src, vl);
+ return __riscv_vfwcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4(
@@ -265,7 +265,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) {
- return vfwcvt_xu(src, vl);
+ return __riscv_vfwcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4(
@@ -274,7 +274,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_xu(src, vl);
+ return __riscv_vfwcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8(
@@ -283,7 +283,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) {
- return vfwcvt_xu(src, vl);
+ return __riscv_vfwcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8(
@@ -292,7 +292,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_xu(src, vl);
+ return __riscv_vfwcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
@@ -301,7 +301,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
@@ -310,7 +310,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
@@ -319,7 +319,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
@@ -328,7 +328,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
@@ -337,7 +337,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
@@ -346,7 +346,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
@@ -364,7 +364,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
@@ -373,7 +373,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
@@ -382,7 +382,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2(
@@ -391,7 +391,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2(
@@ -409,7 +409,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4(
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8(
@@ -427,7 +427,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
@@ -436,7 +436,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
- return vfwcvt_x(src, vl);
+ return __riscv_vfwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
@@ -445,7 +445,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_x(src, vl);
+ return __riscv_vfwcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
@@ -454,7 +454,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
- return vfwcvt_x(src, vl);
+ return __riscv_vfwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
@@ -463,7 +463,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_x(src, vl);
+ return __riscv_vfwcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
@@ -472,7 +472,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
- return vfwcvt_x(src, vl);
+ return __riscv_vfwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
@@ -481,7 +481,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_x(src, vl);
+ return __riscv_vfwcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
@@ -490,7 +490,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
- return vfwcvt_x(src, vl);
+ return __riscv_vfwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
@@ -499,7 +499,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_x(src, vl);
+ return __riscv_vfwcvt_rtz_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
@@ -508,7 +508,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
- return vfwcvt_xu(src, vl);
+ return __riscv_vfwcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
@@ -517,7 +517,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu(src, vl);
+ return __riscv_vfwcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
@@ -526,7 +526,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
- return vfwcvt_xu(src, vl);
+ return __riscv_vfwcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
@@ -535,7 +535,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_xu(src, vl);
+ return __riscv_vfwcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
@@ -544,7 +544,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
- return vfwcvt_xu(src, vl);
+ return __riscv_vfwcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
@@ -553,7 +553,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_xu(src, vl);
+ return __riscv_vfwcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
@@ -562,7 +562,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
- return vfwcvt_xu(src, vl);
+ return __riscv_vfwcvt_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
@@ -571,7 +571,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_xu(src, vl);
+ return __riscv_vfwcvt_rtz_xu(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
@@ -580,7 +580,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
@@ -589,7 +589,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
@@ -598,7 +598,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
@@ -607,7 +607,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
@@ -616,7 +616,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
@@ -625,7 +625,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
@@ -634,7 +634,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
@@ -643,7 +643,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
@@ -652,7 +652,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
@@ -661,7 +661,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
@@ -670,7 +670,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
@@ -679,7 +679,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
- return vfwcvt_f(src, vl);
+ return __riscv_vfwcvt_f(src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_m(
@@ -688,7 +688,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_m(
@@ -697,7 +697,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vint8mf8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_m(
@@ -706,7 +706,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vint8mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_m(
@@ -715,7 +715,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_m(
@@ -724,7 +724,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vint8m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_m(
@@ -733,7 +733,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vint8m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_m(
@@ -742,7 +742,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vint8m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_m(
@@ -751,7 +751,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint8mf8_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_m(
@@ -760,7 +760,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint8mf4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_m(
@@ -769,7 +769,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint8mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_m(
@@ -778,7 +778,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_m(
@@ -787,7 +787,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_m(
@@ -796,7 +796,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_x(mask, src, vl);
+ return __riscv_vfwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m(
@@ -805,7 +805,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_x(mask, src, vl);
+ return __riscv_vfwcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m(
@@ -814,7 +814,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_x(mask, src, vl);
+ return __riscv_vfwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m(
@@ -823,7 +823,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_x(mask, src, vl);
+ return __riscv_vfwcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m(
@@ -832,7 +832,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfwcvt_x(mask, src, vl);
+ return __riscv_vfwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m(
@@ -841,7 +841,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_x(mask, src, vl);
+ return __riscv_vfwcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m(
@@ -850,7 +850,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfwcvt_x(mask, src, vl);
+ return __riscv_vfwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m(
@@ -859,7 +859,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_x(mask, src, vl);
+ return __riscv_vfwcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m(
@@ -868,7 +868,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfwcvt_x(mask, src, vl);
+ return __riscv_vfwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m(
@@ -877,7 +877,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_x(mask, src, vl);
+ return __riscv_vfwcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m(
@@ -886,7 +886,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_xu(mask, src, vl);
+ return __riscv_vfwcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m(
@@ -895,7 +895,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfwcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m(
@@ -904,7 +904,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_xu(mask, src, vl);
+ return __riscv_vfwcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m(
@@ -913,7 +913,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfwcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m(
@@ -922,7 +922,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfwcvt_xu(mask, src, vl);
+ return __riscv_vfwcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m(
@@ -931,7 +931,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfwcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m(
@@ -940,7 +940,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfwcvt_xu(mask, src, vl);
+ return __riscv_vfwcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m(
@@ -949,7 +949,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfwcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m(
@@ -958,7 +958,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfwcvt_xu(mask, src, vl);
+ return __riscv_vfwcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m(
@@ -967,7 +967,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfwcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m(
@@ -976,7 +976,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m(
@@ -985,7 +985,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vint16mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m(
@@ -994,7 +994,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vint16mf2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m(
@@ -1003,7 +1003,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vint16m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m(
@@ -1012,7 +1012,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vint16m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vint16m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m(
@@ -1030,7 +1030,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint16mf4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m(
@@ -1039,7 +1039,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint16mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m(
@@ -1048,7 +1048,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint16m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m(
@@ -1057,7 +1057,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint16m2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_m(
@@ -1066,7 +1066,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint16m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_m(
@@ -1075,7 +1075,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat16mf4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_m(
@@ -1084,7 +1084,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_m(
@@ -1093,7 +1093,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat16m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_m(
@@ -1102,7 +1102,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat16m2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m(
@@ -1111,7 +1111,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat16m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_x(mask, src, vl);
+ return __riscv_vfwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m(
@@ -1120,7 +1120,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_x(mask, src, vl);
+ return __riscv_vfwcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m(
@@ -1129,7 +1129,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfwcvt_x(mask, src, vl);
+ return __riscv_vfwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m(
@@ -1138,7 +1138,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_x(mask, src, vl);
+ return __riscv_vfwcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m(
@@ -1147,7 +1147,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfwcvt_x(mask, src, vl);
+ return __riscv_vfwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m(
@@ -1156,7 +1156,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_x(mask, src, vl);
+ return __riscv_vfwcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m(
@@ -1165,7 +1165,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfwcvt_x(mask, src, vl);
+ return __riscv_vfwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m(
@@ -1174,7 +1174,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_x(mask, src, vl);
+ return __riscv_vfwcvt_rtz_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m(
@@ -1183,7 +1183,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_xu(mask, src, vl);
+ return __riscv_vfwcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m(
@@ -1192,7 +1192,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfwcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m(
@@ -1201,7 +1201,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfwcvt_xu(mask, src, vl);
+ return __riscv_vfwcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m(
@@ -1210,7 +1210,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfwcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m(
@@ -1219,7 +1219,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfwcvt_xu(mask, src, vl);
+ return __riscv_vfwcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m(
@@ -1228,7 +1228,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfwcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m(
@@ -1237,7 +1237,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfwcvt_xu(mask, src, vl);
+ return __riscv_vfwcvt_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m(
@@ -1246,7 +1246,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_xu(mask, src, vl);
+ return __riscv_vfwcvt_rtz_xu(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m(
@@ -1255,7 +1255,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m(
@@ -1264,7 +1264,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vint32mf2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m(
@@ -1273,7 +1273,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vint32m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m(
@@ -1282,7 +1282,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vint32m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m(
@@ -1291,7 +1291,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vint32m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m(
@@ -1300,7 +1300,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m(
@@ -1309,7 +1309,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m(
@@ -1318,7 +1318,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m(
@@ -1327,7 +1327,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint32m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m(
@@ -1336,7 +1336,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m(
@@ -1345,7 +1345,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m(
@@ -1354,6 +1354,6 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) {
- return vfwcvt_f(mask, src, vl);
+ return __riscv_vfwcvt_f(mask, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc.c
index 64f4edfbbca2..d2a4663d9282 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_m(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_m(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_m(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_m(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_m(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_m(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_m(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m(
@@ -328,6 +328,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc(mask, vd, vs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac.c
index fac65679724e..c3a247bd12df 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_m(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_m(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_m(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_m(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_m(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_m(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_m(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m(
@@ -328,6 +328,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac(mask, vd, vs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul.c
index dcf6eb9d836c..c25b1f9f1ef6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
- return vfwmul(op1, op2, vl);
+ return __riscv_vfwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_m(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_m(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_m(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_m(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_m(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_m(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_m(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m(
@@ -328,6 +328,6 @@ vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwmul(mask, op1, op2, vl);
+ return __riscv_vfwmul(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc.c
index 2571c79a1aed..1b4d0f5e328c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_m(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_m(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_m(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_m(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_m(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_m(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_m(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m(
@@ -328,6 +328,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac.c
index bf3e838f5fff..3e039f6eb586 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_m(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_m(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_m(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_m(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_m(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_m(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_m(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_m(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_m(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_m(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m(
@@ -328,6 +328,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c
index f862ee7349b0..58f59e5e4c94 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1(
@@ -22,7 +22,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1(
@@ -31,7 +31,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1(
@@ -58,7 +58,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
@@ -76,7 +76,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
@@ -85,7 +85,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
@@ -94,7 +94,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
@@ -103,7 +103,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(vector, scalar, vl);
+ return __riscv_vfwredosum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m(
@@ -121,7 +121,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m(
@@ -130,7 +130,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m(
@@ -139,7 +139,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
@@ -166,7 +166,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
@@ -175,7 +175,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
@@ -184,7 +184,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
@@ -193,7 +193,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
@@ -202,6 +202,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum(mask, vector, scalar, vl);
+ return __riscv_vfwredosum(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c
index 2ca5ef2c0a87..3d1bed73a949 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1(
@@ -22,7 +22,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1(
@@ -31,7 +31,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1(
@@ -58,7 +58,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1(
@@ -76,7 +76,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1(
@@ -85,7 +85,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1(
@@ -94,7 +94,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1(
@@ -103,7 +103,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(vector, scalar, vl);
+ return __riscv_vfwredusum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t sc
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m(
@@ -121,7 +121,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m(
@@ -130,7 +130,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m(
@@ -139,7 +139,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m(
@@ -166,7 +166,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m(
@@ -175,7 +175,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m(
@@ -184,7 +184,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m(
@@ -193,7 +193,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m(
@@ -202,6 +202,6 @@ vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum(mask, vector, scalar, vl);
+ return __riscv_vfwredusum(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub.c
index 4a9bcb93eb7a..d0b6282dcc8c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_vv(op1, op2, vl);
+ return __riscv_vfwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(op1, op2, vl);
+ return __riscv_vfwsub_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_wv(op1, op2, vl);
+ return __riscv_vfwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2(
@@ -40,7 +40,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(op1, op2, vl);
+ return __riscv_vfwsub_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1(
@@ -49,7 +49,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_vv(op1, op2, vl);
+ return __riscv_vfwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1(
@@ -58,7 +58,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(op1, op2, vl);
+ return __riscv_vfwsub_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_wv(op1, op2, vl);
+ return __riscv_vfwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(op1, op2, vl);
+ return __riscv_vfwsub_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_vv(op1, op2, vl);
+ return __riscv_vfwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(op1, op2, vl);
+ return __riscv_vfwsub_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2(
@@ -103,7 +103,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_wv(op1, op2, vl);
+ return __riscv_vfwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2(
@@ -112,7 +112,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(op1, op2, vl);
+ return __riscv_vfwsub_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4(
@@ -121,7 +121,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_vv(op1, op2, vl);
+ return __riscv_vfwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4(
@@ -130,7 +130,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(op1, op2, vl);
+ return __riscv_vfwsub_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4(
@@ -139,7 +139,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_wv(op1, op2, vl);
+ return __riscv_vfwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4(
@@ -148,7 +148,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(op1, op2, vl);
+ return __riscv_vfwsub_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8(
@@ -157,7 +157,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_vv(op1, op2, vl);
+ return __riscv_vfwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8(
@@ -166,7 +166,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(op1, op2, vl);
+ return __riscv_vfwsub_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8(
@@ -175,7 +175,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_wv(op1, op2, vl);
+ return __riscv_vfwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8(
@@ -184,7 +184,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(op1, op2, vl);
+ return __riscv_vfwsub_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
@@ -193,7 +193,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_vv(op1, op2, vl);
+ return __riscv_vfwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
@@ -202,7 +202,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwsub_vf(op1, op2, vl);
+ return __riscv_vfwsub_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
@@ -211,7 +211,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_wv(op1, op2, vl);
+ return __riscv_vfwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
- return vfwsub_wf(op1, op2, vl);
+ return __riscv_vfwsub_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_vv(op1, op2, vl);
+ return __riscv_vfwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
- return vfwsub_vf(op1, op2, vl);
+ return __riscv_vfwsub_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_wv(op1, op2, vl);
+ return __riscv_vfwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
@@ -256,7 +256,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
- return vfwsub_wf(op1, op2, vl);
+ return __riscv_vfwsub_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_vv(op1, op2, vl);
+ return __riscv_vfwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
- return vfwsub_vf(op1, op2, vl);
+ return __riscv_vfwsub_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
@@ -283,7 +283,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_wv(op1, op2, vl);
+ return __riscv_vfwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
@@ -292,7 +292,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
- return vfwsub_wf(op1, op2, vl);
+ return __riscv_vfwsub_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
@@ -301,7 +301,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_vv(op1, op2, vl);
+ return __riscv_vfwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
- return vfwsub_vf(op1, op2, vl);
+ return __riscv_vfwsub_vf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
@@ -319,7 +319,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_wv(op1, op2, vl);
+ return __riscv_vfwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
- return vfwsub_wf(op1, op2, vl);
+ return __riscv_vfwsub_wf(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_m(
@@ -337,7 +337,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_vv(mask, op1, op2, vl);
+ return __riscv_vfwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_m(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(mask, op1, op2, vl);
+ return __riscv_vfwsub_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_m(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_wv(mask, op1, op2, vl);
+ return __riscv_vfwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_m(
@@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(mask, op1, op2, vl);
+ return __riscv_vfwsub_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_m(
@@ -373,7 +373,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_vv(mask, op1, op2, vl);
+ return __riscv_vfwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_m(
@@ -382,7 +382,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(mask, op1, op2, vl);
+ return __riscv_vfwsub_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_m(
@@ -391,7 +391,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_wv(mask, op1, op2, vl);
+ return __riscv_vfwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_m(
@@ -400,7 +400,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(mask, op1, op2, vl);
+ return __riscv_vfwsub_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_m(
@@ -409,7 +409,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_vv(mask, op1, op2, vl);
+ return __riscv_vfwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_m(
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(mask, op1, op2, vl);
+ return __riscv_vfwsub_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_m(
@@ -427,7 +427,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_wv(mask, op1, op2, vl);
+ return __riscv_vfwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_m(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(mask, op1, op2, vl);
+ return __riscv_vfwsub_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_m(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_vv(mask, op1, op2, vl);
+ return __riscv_vfwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_m(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(mask, op1, op2, vl);
+ return __riscv_vfwsub_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_m(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_wv(mask, op1, op2, vl);
+ return __riscv_vfwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_m(
@@ -472,7 +472,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(mask, op1, op2, vl);
+ return __riscv_vfwsub_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_m(
@@ -481,7 +481,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_vv(mask, op1, op2, vl);
+ return __riscv_vfwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_m(
@@ -490,7 +490,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf(mask, op1, op2, vl);
+ return __riscv_vfwsub_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_m(
@@ -499,7 +499,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_wv(mask, op1, op2, vl);
+ return __riscv_vfwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_m(
@@ -508,7 +508,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf(mask, op1, op2, vl);
+ return __riscv_vfwsub_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_vv(mask, op1, op2, vl);
+ return __riscv_vfwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwsub_vf(mask, op1, op2, vl);
+ return __riscv_vfwsub_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m(
@@ -535,7 +535,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_wv(mask, op1, op2, vl);
+ return __riscv_vfwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m(
@@ -544,7 +544,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwsub_wf(mask, op1, op2, vl);
+ return __riscv_vfwsub_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m(
@@ -553,7 +553,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_vv(mask, op1, op2, vl);
+ return __riscv_vfwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m(
@@ -562,7 +562,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwsub_vf(mask, op1, op2, vl);
+ return __riscv_vfwsub_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m(
@@ -571,7 +571,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_wv(mask, op1, op2, vl);
+ return __riscv_vfwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m(
@@ -580,7 +580,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwsub_wf(mask, op1, op2, vl);
+ return __riscv_vfwsub_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m(
@@ -589,7 +589,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_vv(mask, op1, op2, vl);
+ return __riscv_vfwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m(
@@ -598,7 +598,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwsub_vf(mask, op1, op2, vl);
+ return __riscv_vfwsub_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m(
@@ -607,7 +607,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_wv(mask, op1, op2, vl);
+ return __riscv_vfwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m(
@@ -616,7 +616,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwsub_wf(mask, op1, op2, vl);
+ return __riscv_vfwsub_wf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m(
@@ -625,7 +625,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_vv(mask, op1, op2, vl);
+ return __riscv_vfwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m(
@@ -634,7 +634,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwsub_vf(mask, op1, op2, vl);
+ return __riscv_vfwsub_vf(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m(
@@ -643,7 +643,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_wv(mask, op1, op2, vl);
+ return __riscv_vfwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m(
@@ -652,6 +652,6 @@ vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwsub_wf(mask, op1, op2, vl);
+ return __riscv_vfwsub_wf(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vget.c
index 31cd62fad416..f5c2c81c9561 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vget.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vget.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vget_v_f16m2_f16m1(vfloat16m2_t src, size_t index) {
- return vget_f16m1(src, 0);
+ return __riscv_vget_f16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m1(
@@ -22,7 +22,7 @@ vfloat16m1_t test_vget_v_f16m2_f16m1(vfloat16m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vget_v_f16m4_f16m1(vfloat16m4_t src, size_t index) {
- return vget_f16m1(src, 0);
+ return __riscv_vget_f16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m1(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vget_v_f16m4_f16m1(vfloat16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vget_v_f16m8_f16m1(vfloat16m8_t src, size_t index) {
- return vget_f16m1(src, 0);
+ return __riscv_vget_f16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vget_v_f16m8_f16m1(vfloat16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vget_v_f16m4_f16m2(vfloat16m4_t src, size_t index) {
- return vget_f16m2(src, 0);
+ return __riscv_vget_f16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m2(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vget_v_f16m4_f16m2(vfloat16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vget_v_f16m8_f16m2(vfloat16m8_t src, size_t index) {
- return vget_f16m2(src, 0);
+ return __riscv_vget_f16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m4(
@@ -58,7 +58,7 @@ vfloat16m2_t test_vget_v_f16m8_f16m2(vfloat16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vget_v_f16m8_f16m4(vfloat16m8_t src, size_t index) {
- return vget_f16m4(src, 0);
+ return __riscv_vget_f16m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1(
@@ -67,7 +67,7 @@ vfloat16m4_t test_vget_v_f16m8_f16m4(vfloat16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) {
- return vget_f32m1(src, 0);
+ return __riscv_vget_f32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) {
- return vget_f32m1(src, 0);
+ return __riscv_vget_f32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) {
- return vget_f32m1(src, 0);
+ return __riscv_vget_f32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2(
@@ -94,7 +94,7 @@ vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) {
- return vget_f32m2(src, 0);
+ return __riscv_vget_f32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2(
@@ -103,7 +103,7 @@ vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) {
- return vget_f32m2(src, 0);
+ return __riscv_vget_f32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4(
@@ -112,7 +112,7 @@ vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) {
- return vget_f32m4(src, 0);
+ return __riscv_vget_f32m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1(
@@ -121,7 +121,7 @@ vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) {
- return vget_f64m1(src, 0);
+ return __riscv_vget_f64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) {
- return vget_f64m1(src, 0);
+ return __riscv_vget_f64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1(
@@ -139,7 +139,7 @@ vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) {
- return vget_f64m1(src, 0);
+ return __riscv_vget_f64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2(
@@ -148,7 +148,7 @@ vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) {
- return vget_f64m2(src, 0);
+ return __riscv_vget_f64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2(
@@ -157,7 +157,7 @@ vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) {
- return vget_f64m2(src, 0);
+ return __riscv_vget_f64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4(
@@ -166,7 +166,7 @@ vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) {
- return vget_f64m4(src, 0);
+ return __riscv_vget_f64m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1(
@@ -175,7 +175,7 @@ vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) {
- return vget_i8m1(src, 0);
+ return __riscv_vget_i8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1(
@@ -184,7 +184,7 @@ vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) {
- return vget_i8m1(src, 0);
+ return __riscv_vget_i8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1(
@@ -193,7 +193,7 @@ vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) {
- return vget_i8m1(src, 0);
+ return __riscv_vget_i8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2(
@@ -202,7 +202,7 @@ vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) {
- return vget_i8m2(src, 0);
+ return __riscv_vget_i8m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2(
@@ -211,7 +211,7 @@ vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) {
- return vget_i8m2(src, 0);
+ return __riscv_vget_i8m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4(
@@ -220,7 +220,7 @@ vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) {
- return vget_i8m4(src, 0);
+ return __riscv_vget_i8m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1(
@@ -229,7 +229,7 @@ vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) {
- return vget_i16m1(src, 0);
+ return __riscv_vget_i16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1(
@@ -238,7 +238,7 @@ vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) {
- return vget_i16m1(src, 0);
+ return __riscv_vget_i16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1(
@@ -247,7 +247,7 @@ vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) {
- return vget_i16m1(src, 0);
+ return __riscv_vget_i16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2(
@@ -256,7 +256,7 @@ vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) {
- return vget_i16m2(src, 0);
+ return __riscv_vget_i16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2(
@@ -265,7 +265,7 @@ vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) {
- return vget_i16m2(src, 0);
+ return __riscv_vget_i16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4(
@@ -274,7 +274,7 @@ vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) {
- return vget_i16m4(src, 0);
+ return __riscv_vget_i16m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1(
@@ -283,7 +283,7 @@ vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) {
- return vget_i32m1(src, 0);
+ return __riscv_vget_i32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1(
@@ -292,7 +292,7 @@ vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) {
- return vget_i32m1(src, 0);
+ return __riscv_vget_i32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1(
@@ -301,7 +301,7 @@ vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) {
- return vget_i32m1(src, 0);
+ return __riscv_vget_i32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2(
@@ -310,7 +310,7 @@ vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) {
- return vget_i32m2(src, 0);
+ return __riscv_vget_i32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2(
@@ -319,7 +319,7 @@ vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) {
- return vget_i32m2(src, 0);
+ return __riscv_vget_i32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4(
@@ -328,7 +328,7 @@ vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) {
- return vget_i32m4(src, 0);
+ return __riscv_vget_i32m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1(
@@ -337,7 +337,7 @@ vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) {
- return vget_i64m1(src, 0);
+ return __riscv_vget_i64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1(
@@ -346,7 +346,7 @@ vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) {
- return vget_i64m1(src, 0);
+ return __riscv_vget_i64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1(
@@ -355,7 +355,7 @@ vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) {
- return vget_i64m1(src, 0);
+ return __riscv_vget_i64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2(
@@ -364,7 +364,7 @@ vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) {
- return vget_i64m2(src, 0);
+ return __riscv_vget_i64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2(
@@ -373,7 +373,7 @@ vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) {
- return vget_i64m2(src, 0);
+ return __riscv_vget_i64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4(
@@ -382,7 +382,7 @@ vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) {
- return vget_i64m4(src, 0);
+ return __riscv_vget_i64m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1(
@@ -391,7 +391,7 @@ vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) {
- return vget_u8m1(src, 0);
+ return __riscv_vget_u8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1(
@@ -400,7 +400,7 @@ vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) {
- return vget_u8m1(src, 0);
+ return __riscv_vget_u8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1(
@@ -409,7 +409,7 @@ vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) {
- return vget_u8m1(src, 0);
+ return __riscv_vget_u8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2(
@@ -418,7 +418,7 @@ vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) {
- return vget_u8m2(src, 0);
+ return __riscv_vget_u8m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2(
@@ -427,7 +427,7 @@ vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) {
- return vget_u8m2(src, 0);
+ return __riscv_vget_u8m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4(
@@ -436,7 +436,7 @@ vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) {
- return vget_u8m4(src, 0);
+ return __riscv_vget_u8m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1(
@@ -445,7 +445,7 @@ vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) {
- return vget_u16m1(src, 0);
+ return __riscv_vget_u16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1(
@@ -454,7 +454,7 @@ vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) {
- return vget_u16m1(src, 0);
+ return __riscv_vget_u16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1(
@@ -463,7 +463,7 @@ vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) {
- return vget_u16m1(src, 0);
+ return __riscv_vget_u16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2(
@@ -472,7 +472,7 @@ vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) {
- return vget_u16m2(src, 0);
+ return __riscv_vget_u16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2(
@@ -481,7 +481,7 @@ vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) {
- return vget_u16m2(src, 0);
+ return __riscv_vget_u16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4(
@@ -490,7 +490,7 @@ vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) {
- return vget_u16m4(src, 0);
+ return __riscv_vget_u16m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1(
@@ -499,7 +499,7 @@ vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) {
- return vget_u32m1(src, 0);
+ return __riscv_vget_u32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1(
@@ -508,7 +508,7 @@ vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) {
- return vget_u32m1(src, 0);
+ return __riscv_vget_u32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1(
@@ -517,7 +517,7 @@ vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) {
- return vget_u32m1(src, 0);
+ return __riscv_vget_u32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2(
@@ -526,7 +526,7 @@ vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) {
- return vget_u32m2(src, 0);
+ return __riscv_vget_u32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2(
@@ -535,7 +535,7 @@ vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) {
- return vget_u32m2(src, 0);
+ return __riscv_vget_u32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4(
@@ -544,7 +544,7 @@ vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) {
- return vget_u32m4(src, 0);
+ return __riscv_vget_u32m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1(
@@ -553,7 +553,7 @@ vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) {
- return vget_u64m1(src, 0);
+ return __riscv_vget_u64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1(
@@ -562,7 +562,7 @@ vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) {
- return vget_u64m1(src, 0);
+ return __riscv_vget_u64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1(
@@ -571,7 +571,7 @@ vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) {
- return vget_u64m1(src, 0);
+ return __riscv_vget_u64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2(
@@ -580,7 +580,7 @@ vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) {
- return vget_u64m2(src, 0);
+ return __riscv_vget_u64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2(
@@ -589,7 +589,7 @@ vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) {
- return vget_u64m2(src, 0);
+ return __riscv_vget_u64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4(
@@ -598,6 +598,6 @@ vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
- return vget_u64m4(src, 0);
+ return __riscv_vget_u64m4(src, 0);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16.c
index f3893ffb015f..2a3ee4cab678 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m1_m(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m2_m(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m4_m(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m8_m(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m(
@@ -76,7 +76,7 @@ vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m1_m(
@@ -85,7 +85,7 @@ vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m2_m(
@@ -94,7 +94,7 @@ vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m4_m(
@@ -103,7 +103,7 @@ vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m8_m(
@@ -112,7 +112,7 @@ vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m(
@@ -121,7 +121,7 @@ vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m(
@@ -130,7 +130,7 @@ vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m1_m(
@@ -139,7 +139,7 @@ vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m2_m(
@@ -148,7 +148,7 @@ vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m4_m(
@@ -157,7 +157,7 @@ vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m8_m(
@@ -166,6 +166,6 @@ vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t vl) {
- return vle16(mask, base, vl);
+ return __riscv_vle16(mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16ff.c
index ada915964caa..afb7525b7b41 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16ff.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP1]]
//
vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_m(
@@ -28,7 +28,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP1]]
//
vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_m(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP1]]
//
vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_m(
@@ -52,7 +52,7 @@ vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP1]]
//
vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_m(
@@ -64,7 +64,7 @@ vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP1]]
//
vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_m(
@@ -76,7 +76,7 @@ vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP1]]
//
vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_m(
@@ -88,7 +88,7 @@ vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_m(
@@ -100,7 +100,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_m(
@@ -112,7 +112,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_m(
@@ -124,7 +124,7 @@ vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_m(
@@ -136,7 +136,7 @@ vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t *ne
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_m(
@@ -148,7 +148,7 @@ vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t *ne
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_m(
@@ -160,7 +160,7 @@ vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t *ne
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_m(
@@ -172,7 +172,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_m(
@@ -184,7 +184,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_m(
@@ -196,7 +196,7 @@ vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_m(
@@ -208,7 +208,7 @@ vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t *
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_m(
@@ -220,6 +220,6 @@ vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t *
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff(mask, base, new_vl, vl);
+ return __riscv_vle16ff(mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32.c
index a9179c9ce2d7..a2842843ea66 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, const float *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m1_m(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, const float *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, const float *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m2_m(
@@ -31,7 +31,7 @@ vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, const float *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, const float *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m4_m(
@@ -40,7 +40,7 @@ vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, const float *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, const float *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m8_m(
@@ -49,7 +49,7 @@ vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, const float *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, const float *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m(
@@ -58,7 +58,7 @@ vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, const float *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m1_m(
@@ -67,7 +67,7 @@ vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m2_m(
@@ -76,7 +76,7 @@ vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m4_m(
@@ -85,7 +85,7 @@ vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m8_m(
@@ -94,7 +94,7 @@ vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m(
@@ -103,7 +103,7 @@ vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m1_m(
@@ -112,7 +112,7 @@ vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m2_m(
@@ -121,7 +121,7 @@ vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m4_m(
@@ -130,7 +130,7 @@ vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m8_m(
@@ -139,6 +139,6 @@ vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t vl) {
- return vle32(mask, base, vl);
+ return __riscv_vle32(mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32ff.c
index 2322bbe3a65d..ac2fea844bb3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32ff.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
//
vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m(
@@ -28,7 +28,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, const float *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
//
vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, const float *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m(
@@ -52,7 +52,7 @@ vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, const float *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
//
vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m(
@@ -64,7 +64,7 @@ vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, const float *base, size_t *ne
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
//
vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m(
@@ -76,7 +76,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, const float *base, size_t *ne
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m(
@@ -88,7 +88,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m(
@@ -100,7 +100,7 @@ vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m(
@@ -112,7 +112,7 @@ vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m(
@@ -124,7 +124,7 @@ vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t *ne
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m(
@@ -136,7 +136,7 @@ vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t *ne
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m(
@@ -148,7 +148,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m(
@@ -160,7 +160,7 @@ vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m(
@@ -172,7 +172,7 @@ vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m(
@@ -184,6 +184,6 @@ vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t *
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff(mask, base, new_vl, vl);
+ return __riscv_vle32ff(mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64.c
index a3352fbbc1bf..420b4df650ba 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, const double *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m2_m(
@@ -22,7 +22,7 @@ vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, const double *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, const double *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m4_m(
@@ -31,7 +31,7 @@ vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, const double *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, const double *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m8_m(
@@ -40,7 +40,7 @@ vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, const double *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, const double *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m1_m(
@@ -49,7 +49,7 @@ vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, const double *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m2_m(
@@ -58,7 +58,7 @@ vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m4_m(
@@ -67,7 +67,7 @@ vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m8_m(
@@ -76,7 +76,7 @@ vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m1_m(
@@ -85,7 +85,7 @@ vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m2_m(
@@ -94,7 +94,7 @@ vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m4_m(
@@ -103,7 +103,7 @@ vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m8_m(
@@ -112,6 +112,6 @@ vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t vl) {
- return vle64(mask, base, vl);
+ return __riscv_vle64(mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64ff.c
index d0833445c35b..4c452ae296e0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64ff.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
//
vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_m(
@@ -28,7 +28,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, const double *base, size_t *
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_m(
@@ -40,7 +40,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, const double *base, size_t *
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
//
vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_m(
@@ -52,7 +52,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, const double *base, size_t *
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
//
vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_m(
@@ -64,7 +64,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, const double *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_m(
@@ -76,7 +76,7 @@ vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_m(
@@ -88,7 +88,7 @@ vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_m(
@@ -100,7 +100,7 @@ vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_m(
@@ -112,7 +112,7 @@ vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t *ne
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_m(
@@ -124,7 +124,7 @@ vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_m(
@@ -136,7 +136,7 @@ vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_m(
@@ -148,6 +148,6 @@ vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff(mask, base, new_vl, vl);
+ return __riscv_vle64ff(mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8.c
index 11608d18cb66..d36577843562 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m(
@@ -22,7 +22,7 @@ vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m(
@@ -31,7 +31,7 @@ vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m1_m(
@@ -40,7 +40,7 @@ vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m2_m(
@@ -49,7 +49,7 @@ vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m4_m(
@@ -58,7 +58,7 @@ vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m8_m(
@@ -67,7 +67,7 @@ vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m(
@@ -76,7 +76,7 @@ vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m(
@@ -85,7 +85,7 @@ vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m(
@@ -94,7 +94,7 @@ vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m1_m(
@@ -103,7 +103,7 @@ vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m2_m(
@@ -112,7 +112,7 @@ vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m4_m(
@@ -121,7 +121,7 @@ vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m8_m(
@@ -130,6 +130,6 @@ vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t vl) {
- return vle8(mask, base, vl);
+ return __riscv_vle8(mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8ff.c
index c1e39816351d..e88b4ed6e69f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8ff.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_m(
@@ -28,7 +28,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t *new
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_m(
@@ -40,7 +40,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t *new
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_m(
@@ -52,7 +52,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t *new
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_m(
@@ -64,7 +64,7 @@ vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t *new_vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_m(
@@ -76,7 +76,7 @@ vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t *new_vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_m(
@@ -88,7 +88,7 @@ vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t *new_vl
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_m(
@@ -100,7 +100,7 @@ vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t *new_vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_m(
@@ -112,7 +112,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_m(
@@ -124,7 +124,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_m(
@@ -136,7 +136,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t *n
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_m(
@@ -148,7 +148,7 @@ vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t *new_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_m(
@@ -160,7 +160,7 @@ vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t *new_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_m(
@@ -172,6 +172,6 @@ vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t *new_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff(mask, base, new_vl, vl);
+ return __riscv_vle8ff(mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul.c
index 81b8b2d45113..4e8ac67627d9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t op1) {
- return vlmul_ext_f16mf2(op1);
+ return __riscv_vlmul_ext_f16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m1(
@@ -22,7 +22,7 @@ vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t op1) {
- return vlmul_ext_f16m1(op1);
+ return __riscv_vlmul_ext_f16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m2(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t op1) {
- return vlmul_ext_f16m2(op1);
+ return __riscv_vlmul_ext_f16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m4(
@@ -40,7 +40,7 @@ vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t op1) {
- return vlmul_ext_f16m4(op1);
+ return __riscv_vlmul_ext_f16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m8(
@@ -49,7 +49,7 @@ vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t op1) {
- return vlmul_ext_f16m8(op1);
+ return __riscv_vlmul_ext_f16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m1(
@@ -58,7 +58,7 @@ vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t op1) {
- return vlmul_ext_f16m1(op1);
+ return __riscv_vlmul_ext_f16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t op1) {
- return vlmul_ext_f16m2(op1);
+ return __riscv_vlmul_ext_f16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m4(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t op1) {
- return vlmul_ext_f16m4(op1);
+ return __riscv_vlmul_ext_f16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m8(
@@ -85,7 +85,7 @@ vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t op1) {
- return vlmul_ext_f16m8(op1);
+ return __riscv_vlmul_ext_f16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m2(
@@ -94,7 +94,7 @@ vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t op1) {
- return vlmul_ext_f16m2(op1);
+ return __riscv_vlmul_ext_f16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m4(
@@ -103,7 +103,7 @@ vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t op1) {
- return vlmul_ext_f16m4(op1);
+ return __riscv_vlmul_ext_f16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m8(
@@ -112,7 +112,7 @@ vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t op1) {
- return vlmul_ext_f16m8(op1);
+ return __riscv_vlmul_ext_f16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m4(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t op1) {
- return vlmul_ext_f16m4(op1);
+ return __riscv_vlmul_ext_f16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m8(
@@ -130,7 +130,7 @@ vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t op1) {
- return vlmul_ext_f16m8(op1);
+ return __riscv_vlmul_ext_f16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m4_f16m8(
@@ -139,7 +139,7 @@ vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t op1) {
- return vlmul_ext_f16m8(op1);
+ return __riscv_vlmul_ext_f16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1(
@@ -148,7 +148,7 @@ vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) {
- return vlmul_ext_f32m1(op1);
+ return __riscv_vlmul_ext_f32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) {
- return vlmul_ext_f32m2(op1);
+ return __riscv_vlmul_ext_f32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) {
- return vlmul_ext_f32m4(op1);
+ return __riscv_vlmul_ext_f32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8(
@@ -175,7 +175,7 @@ vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) {
- return vlmul_ext_f32m8(op1);
+ return __riscv_vlmul_ext_f32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2(
@@ -184,7 +184,7 @@ vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) {
- return vlmul_ext_f32m2(op1);
+ return __riscv_vlmul_ext_f32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4(
@@ -193,7 +193,7 @@ vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) {
- return vlmul_ext_f32m4(op1);
+ return __riscv_vlmul_ext_f32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8(
@@ -202,7 +202,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) {
- return vlmul_ext_f32m8(op1);
+ return __riscv_vlmul_ext_f32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) {
- return vlmul_ext_f32m4(op1);
+ return __riscv_vlmul_ext_f32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8(
@@ -220,7 +220,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) {
- return vlmul_ext_f32m8(op1);
+ return __riscv_vlmul_ext_f32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8(
@@ -229,7 +229,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) {
- return vlmul_ext_f32m8(op1);
+ return __riscv_vlmul_ext_f32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2(
@@ -238,7 +238,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) {
- return vlmul_ext_f64m2(op1);
+ return __riscv_vlmul_ext_f64m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) {
- return vlmul_ext_f64m4(op1);
+ return __riscv_vlmul_ext_f64m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) {
- return vlmul_ext_f64m8(op1);
+ return __riscv_vlmul_ext_f64m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4(
@@ -265,7 +265,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) {
- return vlmul_ext_f64m4(op1);
+ return __riscv_vlmul_ext_f64m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) {
- return vlmul_ext_f64m8(op1);
+ return __riscv_vlmul_ext_f64m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) {
- return vlmul_ext_f64m8(op1);
+ return __riscv_vlmul_ext_f64m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4(
@@ -292,7 +292,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) {
- return vlmul_ext_i8mf4(op1);
+ return __riscv_vlmul_ext_i8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2(
@@ -301,7 +301,7 @@ vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) {
- return vlmul_ext_i8mf2(op1);
+ return __riscv_vlmul_ext_i8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1(
@@ -310,7 +310,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) {
- return vlmul_ext_i8m1(op1);
+ return __riscv_vlmul_ext_i8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2(
@@ -319,7 +319,7 @@ vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) {
- return vlmul_ext_i8m2(op1);
+ return __riscv_vlmul_ext_i8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4(
@@ -328,7 +328,7 @@ vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) {
- return vlmul_ext_i8m4(op1);
+ return __riscv_vlmul_ext_i8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8(
@@ -337,7 +337,7 @@ vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) {
- return vlmul_ext_i8m8(op1);
+ return __riscv_vlmul_ext_i8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2(
@@ -346,7 +346,7 @@ vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) {
- return vlmul_ext_i8mf2(op1);
+ return __riscv_vlmul_ext_i8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1(
@@ -355,7 +355,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) {
- return vlmul_ext_i8m1(op1);
+ return __riscv_vlmul_ext_i8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2(
@@ -364,7 +364,7 @@ vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) {
- return vlmul_ext_i8m2(op1);
+ return __riscv_vlmul_ext_i8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4(
@@ -373,7 +373,7 @@ vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) {
- return vlmul_ext_i8m4(op1);
+ return __riscv_vlmul_ext_i8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8(
@@ -382,7 +382,7 @@ vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) {
- return vlmul_ext_i8m8(op1);
+ return __riscv_vlmul_ext_i8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1(
@@ -391,7 +391,7 @@ vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) {
- return vlmul_ext_i8m1(op1);
+ return __riscv_vlmul_ext_i8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2(
@@ -400,7 +400,7 @@ vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) {
- return vlmul_ext_i8m2(op1);
+ return __riscv_vlmul_ext_i8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4(
@@ -409,7 +409,7 @@ vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) {
- return vlmul_ext_i8m4(op1);
+ return __riscv_vlmul_ext_i8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8(
@@ -418,7 +418,7 @@ vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) {
- return vlmul_ext_i8m8(op1);
+ return __riscv_vlmul_ext_i8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2(
@@ -427,7 +427,7 @@ vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) {
- return vlmul_ext_i8m2(op1);
+ return __riscv_vlmul_ext_i8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4(
@@ -436,7 +436,7 @@ vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) {
- return vlmul_ext_i8m4(op1);
+ return __riscv_vlmul_ext_i8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8(
@@ -445,7 +445,7 @@ vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) {
- return vlmul_ext_i8m8(op1);
+ return __riscv_vlmul_ext_i8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4(
@@ -454,7 +454,7 @@ vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) {
- return vlmul_ext_i8m4(op1);
+ return __riscv_vlmul_ext_i8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8(
@@ -463,7 +463,7 @@ vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) {
- return vlmul_ext_i8m8(op1);
+ return __riscv_vlmul_ext_i8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8(
@@ -472,7 +472,7 @@ vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) {
- return vlmul_ext_i8m8(op1);
+ return __riscv_vlmul_ext_i8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2(
@@ -481,7 +481,7 @@ vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) {
- return vlmul_ext_i16mf2(op1);
+ return __riscv_vlmul_ext_i16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1(
@@ -490,7 +490,7 @@ vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) {
- return vlmul_ext_i16m1(op1);
+ return __riscv_vlmul_ext_i16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2(
@@ -499,7 +499,7 @@ vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) {
- return vlmul_ext_i16m2(op1);
+ return __riscv_vlmul_ext_i16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4(
@@ -508,7 +508,7 @@ vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) {
- return vlmul_ext_i16m4(op1);
+ return __riscv_vlmul_ext_i16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8(
@@ -517,7 +517,7 @@ vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) {
- return vlmul_ext_i16m8(op1);
+ return __riscv_vlmul_ext_i16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1(
@@ -526,7 +526,7 @@ vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) {
- return vlmul_ext_i16m1(op1);
+ return __riscv_vlmul_ext_i16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2(
@@ -535,7 +535,7 @@ vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) {
- return vlmul_ext_i16m2(op1);
+ return __riscv_vlmul_ext_i16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4(
@@ -544,7 +544,7 @@ vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) {
- return vlmul_ext_i16m4(op1);
+ return __riscv_vlmul_ext_i16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8(
@@ -553,7 +553,7 @@ vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) {
- return vlmul_ext_i16m8(op1);
+ return __riscv_vlmul_ext_i16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2(
@@ -562,7 +562,7 @@ vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) {
- return vlmul_ext_i16m2(op1);
+ return __riscv_vlmul_ext_i16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4(
@@ -571,7 +571,7 @@ vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) {
- return vlmul_ext_i16m4(op1);
+ return __riscv_vlmul_ext_i16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8(
@@ -580,7 +580,7 @@ vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) {
- return vlmul_ext_i16m8(op1);
+ return __riscv_vlmul_ext_i16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4(
@@ -589,7 +589,7 @@ vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) {
- return vlmul_ext_i16m4(op1);
+ return __riscv_vlmul_ext_i16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8(
@@ -598,7 +598,7 @@ vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) {
- return vlmul_ext_i16m8(op1);
+ return __riscv_vlmul_ext_i16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8(
@@ -607,7 +607,7 @@ vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) {
- return vlmul_ext_i16m8(op1);
+ return __riscv_vlmul_ext_i16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1(
@@ -616,7 +616,7 @@ vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) {
- return vlmul_ext_i32m1(op1);
+ return __riscv_vlmul_ext_i32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2(
@@ -625,7 +625,7 @@ vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) {
- return vlmul_ext_i32m2(op1);
+ return __riscv_vlmul_ext_i32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4(
@@ -634,7 +634,7 @@ vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) {
- return vlmul_ext_i32m4(op1);
+ return __riscv_vlmul_ext_i32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8(
@@ -643,7 +643,7 @@ vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) {
- return vlmul_ext_i32m8(op1);
+ return __riscv_vlmul_ext_i32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2(
@@ -652,7 +652,7 @@ vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) {
- return vlmul_ext_i32m2(op1);
+ return __riscv_vlmul_ext_i32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4(
@@ -661,7 +661,7 @@ vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) {
- return vlmul_ext_i32m4(op1);
+ return __riscv_vlmul_ext_i32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8(
@@ -670,7 +670,7 @@ vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) {
- return vlmul_ext_i32m8(op1);
+ return __riscv_vlmul_ext_i32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4(
@@ -679,7 +679,7 @@ vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) {
- return vlmul_ext_i32m4(op1);
+ return __riscv_vlmul_ext_i32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8(
@@ -688,7 +688,7 @@ vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) {
- return vlmul_ext_i32m8(op1);
+ return __riscv_vlmul_ext_i32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8(
@@ -697,7 +697,7 @@ vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) {
- return vlmul_ext_i32m8(op1);
+ return __riscv_vlmul_ext_i32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2(
@@ -706,7 +706,7 @@ vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) {
- return vlmul_ext_i64m2(op1);
+ return __riscv_vlmul_ext_i64m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4(
@@ -715,7 +715,7 @@ vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) {
- return vlmul_ext_i64m4(op1);
+ return __riscv_vlmul_ext_i64m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8(
@@ -724,7 +724,7 @@ vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) {
- return vlmul_ext_i64m8(op1);
+ return __riscv_vlmul_ext_i64m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4(
@@ -733,7 +733,7 @@ vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) {
- return vlmul_ext_i64m4(op1);
+ return __riscv_vlmul_ext_i64m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8(
@@ -742,7 +742,7 @@ vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) {
- return vlmul_ext_i64m8(op1);
+ return __riscv_vlmul_ext_i64m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8(
@@ -751,7 +751,7 @@ vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) {
- return vlmul_ext_i64m8(op1);
+ return __riscv_vlmul_ext_i64m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4(
@@ -760,7 +760,7 @@ vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) {
- return vlmul_ext_u8mf4(op1);
+ return __riscv_vlmul_ext_u8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2(
@@ -769,7 +769,7 @@ vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) {
- return vlmul_ext_u8mf2(op1);
+ return __riscv_vlmul_ext_u8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1(
@@ -778,7 +778,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) {
- return vlmul_ext_u8m1(op1);
+ return __riscv_vlmul_ext_u8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2(
@@ -787,7 +787,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) {
- return vlmul_ext_u8m2(op1);
+ return __riscv_vlmul_ext_u8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4(
@@ -796,7 +796,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) {
- return vlmul_ext_u8m4(op1);
+ return __riscv_vlmul_ext_u8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8(
@@ -805,7 +805,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) {
- return vlmul_ext_u8m8(op1);
+ return __riscv_vlmul_ext_u8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2(
@@ -814,7 +814,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) {
- return vlmul_ext_u8mf2(op1);
+ return __riscv_vlmul_ext_u8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1(
@@ -823,7 +823,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) {
- return vlmul_ext_u8m1(op1);
+ return __riscv_vlmul_ext_u8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2(
@@ -832,7 +832,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) {
- return vlmul_ext_u8m2(op1);
+ return __riscv_vlmul_ext_u8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4(
@@ -841,7 +841,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) {
- return vlmul_ext_u8m4(op1);
+ return __riscv_vlmul_ext_u8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8(
@@ -850,7 +850,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) {
- return vlmul_ext_u8m8(op1);
+ return __riscv_vlmul_ext_u8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1(
@@ -859,7 +859,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) {
- return vlmul_ext_u8m1(op1);
+ return __riscv_vlmul_ext_u8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2(
@@ -868,7 +868,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) {
- return vlmul_ext_u8m2(op1);
+ return __riscv_vlmul_ext_u8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4(
@@ -877,7 +877,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) {
- return vlmul_ext_u8m4(op1);
+ return __riscv_vlmul_ext_u8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8(
@@ -886,7 +886,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) {
- return vlmul_ext_u8m8(op1);
+ return __riscv_vlmul_ext_u8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2(
@@ -895,7 +895,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) {
- return vlmul_ext_u8m2(op1);
+ return __riscv_vlmul_ext_u8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4(
@@ -904,7 +904,7 @@ vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) {
- return vlmul_ext_u8m4(op1);
+ return __riscv_vlmul_ext_u8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8(
@@ -913,7 +913,7 @@ vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) {
- return vlmul_ext_u8m8(op1);
+ return __riscv_vlmul_ext_u8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4(
@@ -922,7 +922,7 @@ vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) {
- return vlmul_ext_u8m4(op1);
+ return __riscv_vlmul_ext_u8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8(
@@ -931,7 +931,7 @@ vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) {
- return vlmul_ext_u8m8(op1);
+ return __riscv_vlmul_ext_u8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8(
@@ -940,7 +940,7 @@ vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) {
- return vlmul_ext_u8m8(op1);
+ return __riscv_vlmul_ext_u8m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2(
@@ -949,7 +949,7 @@ vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) {
- return vlmul_ext_u16mf2(op1);
+ return __riscv_vlmul_ext_u16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) {
- return vlmul_ext_u16m1(op1);
+ return __riscv_vlmul_ext_u16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2(
@@ -967,7 +967,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) {
- return vlmul_ext_u16m2(op1);
+ return __riscv_vlmul_ext_u16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4(
@@ -976,7 +976,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) {
- return vlmul_ext_u16m4(op1);
+ return __riscv_vlmul_ext_u16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8(
@@ -985,7 +985,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) {
- return vlmul_ext_u16m8(op1);
+ return __riscv_vlmul_ext_u16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1(
@@ -994,7 +994,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) {
- return vlmul_ext_u16m1(op1);
+ return __riscv_vlmul_ext_u16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2(
@@ -1003,7 +1003,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) {
- return vlmul_ext_u16m2(op1);
+ return __riscv_vlmul_ext_u16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4(
@@ -1012,7 +1012,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) {
- return vlmul_ext_u16m4(op1);
+ return __riscv_vlmul_ext_u16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8(
@@ -1021,7 +1021,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) {
- return vlmul_ext_u16m8(op1);
+ return __riscv_vlmul_ext_u16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2(
@@ -1030,7 +1030,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) {
- return vlmul_ext_u16m2(op1);
+ return __riscv_vlmul_ext_u16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4(
@@ -1039,7 +1039,7 @@ vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) {
- return vlmul_ext_u16m4(op1);
+ return __riscv_vlmul_ext_u16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8(
@@ -1048,7 +1048,7 @@ vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) {
- return vlmul_ext_u16m8(op1);
+ return __riscv_vlmul_ext_u16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4(
@@ -1057,7 +1057,7 @@ vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) {
- return vlmul_ext_u16m4(op1);
+ return __riscv_vlmul_ext_u16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8(
@@ -1066,7 +1066,7 @@ vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) {
- return vlmul_ext_u16m8(op1);
+ return __riscv_vlmul_ext_u16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8(
@@ -1075,7 +1075,7 @@ vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) {
- return vlmul_ext_u16m8(op1);
+ return __riscv_vlmul_ext_u16m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1(
@@ -1084,7 +1084,7 @@ vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) {
- return vlmul_ext_u32m1(op1);
+ return __riscv_vlmul_ext_u32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2(
@@ -1093,7 +1093,7 @@ vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) {
- return vlmul_ext_u32m2(op1);
+ return __riscv_vlmul_ext_u32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4(
@@ -1102,7 +1102,7 @@ vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) {
- return vlmul_ext_u32m4(op1);
+ return __riscv_vlmul_ext_u32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8(
@@ -1111,7 +1111,7 @@ vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) {
- return vlmul_ext_u32m8(op1);
+ return __riscv_vlmul_ext_u32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2(
@@ -1120,7 +1120,7 @@ vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) {
- return vlmul_ext_u32m2(op1);
+ return __riscv_vlmul_ext_u32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4(
@@ -1129,7 +1129,7 @@ vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) {
- return vlmul_ext_u32m4(op1);
+ return __riscv_vlmul_ext_u32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8(
@@ -1138,7 +1138,7 @@ vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) {
- return vlmul_ext_u32m8(op1);
+ return __riscv_vlmul_ext_u32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4(
@@ -1147,7 +1147,7 @@ vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) {
- return vlmul_ext_u32m4(op1);
+ return __riscv_vlmul_ext_u32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8(
@@ -1156,7 +1156,7 @@ vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) {
- return vlmul_ext_u32m8(op1);
+ return __riscv_vlmul_ext_u32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8(
@@ -1165,7 +1165,7 @@ vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) {
- return vlmul_ext_u32m8(op1);
+ return __riscv_vlmul_ext_u32m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2(
@@ -1174,7 +1174,7 @@ vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) {
- return vlmul_ext_u64m2(op1);
+ return __riscv_vlmul_ext_u64m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4(
@@ -1183,7 +1183,7 @@ vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) {
- return vlmul_ext_u64m4(op1);
+ return __riscv_vlmul_ext_u64m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8(
@@ -1192,7 +1192,7 @@ vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) {
- return vlmul_ext_u64m8(op1);
+ return __riscv_vlmul_ext_u64m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4(
@@ -1201,7 +1201,7 @@ vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) {
- return vlmul_ext_u64m4(op1);
+ return __riscv_vlmul_ext_u64m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8(
@@ -1210,7 +1210,7 @@ vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) {
- return vlmul_ext_u64m8(op1);
+ return __riscv_vlmul_ext_u64m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8(
@@ -1219,7 +1219,7 @@ vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) {
- return vlmul_ext_u64m8(op1);
+ return __riscv_vlmul_ext_u64m8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4(
@@ -1228,7 +1228,7 @@ vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) {
- return vlmul_trunc_f16mf4(op1);
+ return __riscv_vlmul_trunc_f16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4(
@@ -1237,7 +1237,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) {
- return vlmul_trunc_f16mf4(op1);
+ return __riscv_vlmul_trunc_f16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2(
@@ -1246,7 +1246,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) {
- return vlmul_trunc_f16mf2(op1);
+ return __riscv_vlmul_trunc_f16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4(
@@ -1255,7 +1255,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) {
- return vlmul_trunc_f16mf4(op1);
+ return __riscv_vlmul_trunc_f16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2(
@@ -1264,7 +1264,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) {
- return vlmul_trunc_f16mf2(op1);
+ return __riscv_vlmul_trunc_f16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1(
@@ -1273,7 +1273,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) {
- return vlmul_trunc_f16m1(op1);
+ return __riscv_vlmul_trunc_f16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4(
@@ -1282,7 +1282,7 @@ vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) {
- return vlmul_trunc_f16mf4(op1);
+ return __riscv_vlmul_trunc_f16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2(
@@ -1291,7 +1291,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) {
- return vlmul_trunc_f16mf2(op1);
+ return __riscv_vlmul_trunc_f16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1(
@@ -1300,7 +1300,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) {
- return vlmul_trunc_f16m1(op1);
+ return __riscv_vlmul_trunc_f16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2(
@@ -1309,7 +1309,7 @@ vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) {
- return vlmul_trunc_f16m2(op1);
+ return __riscv_vlmul_trunc_f16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4(
@@ -1318,7 +1318,7 @@ vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) {
- return vlmul_trunc_f16mf4(op1);
+ return __riscv_vlmul_trunc_f16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2(
@@ -1327,7 +1327,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) {
- return vlmul_trunc_f16mf2(op1);
+ return __riscv_vlmul_trunc_f16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1(
@@ -1336,7 +1336,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) {
- return vlmul_trunc_f16m1(op1);
+ return __riscv_vlmul_trunc_f16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2(
@@ -1345,7 +1345,7 @@ vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) {
- return vlmul_trunc_f16m2(op1);
+ return __riscv_vlmul_trunc_f16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4(
@@ -1354,7 +1354,7 @@ vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) {
- return vlmul_trunc_f16m4(op1);
+ return __riscv_vlmul_trunc_f16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2(
@@ -1363,7 +1363,7 @@ vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) {
- return vlmul_trunc_f32mf2(op1);
+ return __riscv_vlmul_trunc_f32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2(
@@ -1372,7 +1372,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) {
- return vlmul_trunc_f32mf2(op1);
+ return __riscv_vlmul_trunc_f32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1(
@@ -1381,7 +1381,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) {
- return vlmul_trunc_f32m1(op1);
+ return __riscv_vlmul_trunc_f32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2(
@@ -1390,7 +1390,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) {
- return vlmul_trunc_f32mf2(op1);
+ return __riscv_vlmul_trunc_f32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1(
@@ -1399,7 +1399,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) {
- return vlmul_trunc_f32m1(op1);
+ return __riscv_vlmul_trunc_f32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2(
@@ -1408,7 +1408,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) {
- return vlmul_trunc_f32m2(op1);
+ return __riscv_vlmul_trunc_f32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2(
@@ -1417,7 +1417,7 @@ vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) {
- return vlmul_trunc_f32mf2(op1);
+ return __riscv_vlmul_trunc_f32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1(
@@ -1426,7 +1426,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) {
- return vlmul_trunc_f32m1(op1);
+ return __riscv_vlmul_trunc_f32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2(
@@ -1435,7 +1435,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) {
- return vlmul_trunc_f32m2(op1);
+ return __riscv_vlmul_trunc_f32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4(
@@ -1444,7 +1444,7 @@ vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) {
- return vlmul_trunc_f32m4(op1);
+ return __riscv_vlmul_trunc_f32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1(
@@ -1453,7 +1453,7 @@ vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) {
- return vlmul_trunc_f64m1(op1);
+ return __riscv_vlmul_trunc_f64m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1(
@@ -1462,7 +1462,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) {
- return vlmul_trunc_f64m1(op1);
+ return __riscv_vlmul_trunc_f64m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2(
@@ -1471,7 +1471,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) {
- return vlmul_trunc_f64m2(op1);
+ return __riscv_vlmul_trunc_f64m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1(
@@ -1480,7 +1480,7 @@ vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) {
- return vlmul_trunc_f64m1(op1);
+ return __riscv_vlmul_trunc_f64m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2(
@@ -1489,7 +1489,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) {
- return vlmul_trunc_f64m2(op1);
+ return __riscv_vlmul_trunc_f64m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4(
@@ -1498,7 +1498,7 @@ vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) {
- return vlmul_trunc_f64m4(op1);
+ return __riscv_vlmul_trunc_f64m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8(
@@ -1507,7 +1507,7 @@ vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) {
- return vlmul_trunc_i8mf8(op1);
+ return __riscv_vlmul_trunc_i8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8(
@@ -1516,7 +1516,7 @@ vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) {
- return vlmul_trunc_i8mf8(op1);
+ return __riscv_vlmul_trunc_i8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4(
@@ -1525,7 +1525,7 @@ vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) {
- return vlmul_trunc_i8mf4(op1);
+ return __riscv_vlmul_trunc_i8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8(
@@ -1534,7 +1534,7 @@ vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) {
- return vlmul_trunc_i8mf8(op1);
+ return __riscv_vlmul_trunc_i8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4(
@@ -1543,7 +1543,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) {
- return vlmul_trunc_i8mf4(op1);
+ return __riscv_vlmul_trunc_i8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2(
@@ -1552,7 +1552,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) {
- return vlmul_trunc_i8mf2(op1);
+ return __riscv_vlmul_trunc_i8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8(
@@ -1561,7 +1561,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) {
- return vlmul_trunc_i8mf8(op1);
+ return __riscv_vlmul_trunc_i8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4(
@@ -1570,7 +1570,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) {
- return vlmul_trunc_i8mf4(op1);
+ return __riscv_vlmul_trunc_i8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2(
@@ -1579,7 +1579,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) {
- return vlmul_trunc_i8mf2(op1);
+ return __riscv_vlmul_trunc_i8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1(
@@ -1588,7 +1588,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) {
- return vlmul_trunc_i8m1(op1);
+ return __riscv_vlmul_trunc_i8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8(
@@ -1597,7 +1597,7 @@ vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) {
- return vlmul_trunc_i8mf8(op1);
+ return __riscv_vlmul_trunc_i8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4(
@@ -1606,7 +1606,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) {
- return vlmul_trunc_i8mf4(op1);
+ return __riscv_vlmul_trunc_i8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2(
@@ -1615,7 +1615,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) {
- return vlmul_trunc_i8mf2(op1);
+ return __riscv_vlmul_trunc_i8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1(
@@ -1624,7 +1624,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) {
- return vlmul_trunc_i8m1(op1);
+ return __riscv_vlmul_trunc_i8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2(
@@ -1633,7 +1633,7 @@ vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) {
- return vlmul_trunc_i8m2(op1);
+ return __riscv_vlmul_trunc_i8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8(
@@ -1642,7 +1642,7 @@ vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) {
- return vlmul_trunc_i8mf8(op1);
+ return __riscv_vlmul_trunc_i8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4(
@@ -1651,7 +1651,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) {
- return vlmul_trunc_i8mf4(op1);
+ return __riscv_vlmul_trunc_i8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2(
@@ -1660,7 +1660,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) {
- return vlmul_trunc_i8mf2(op1);
+ return __riscv_vlmul_trunc_i8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1(
@@ -1669,7 +1669,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) {
- return vlmul_trunc_i8m1(op1);
+ return __riscv_vlmul_trunc_i8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2(
@@ -1678,7 +1678,7 @@ vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) {
- return vlmul_trunc_i8m2(op1);
+ return __riscv_vlmul_trunc_i8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4(
@@ -1687,7 +1687,7 @@ vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) {
- return vlmul_trunc_i8m4(op1);
+ return __riscv_vlmul_trunc_i8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4(
@@ -1696,7 +1696,7 @@ vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) {
- return vlmul_trunc_i16mf4(op1);
+ return __riscv_vlmul_trunc_i16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4(
@@ -1705,7 +1705,7 @@ vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) {
- return vlmul_trunc_i16mf4(op1);
+ return __riscv_vlmul_trunc_i16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2(
@@ -1714,7 +1714,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) {
- return vlmul_trunc_i16mf2(op1);
+ return __riscv_vlmul_trunc_i16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4(
@@ -1723,7 +1723,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) {
- return vlmul_trunc_i16mf4(op1);
+ return __riscv_vlmul_trunc_i16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2(
@@ -1732,7 +1732,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) {
- return vlmul_trunc_i16mf2(op1);
+ return __riscv_vlmul_trunc_i16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1(
@@ -1741,7 +1741,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) {
- return vlmul_trunc_i16m1(op1);
+ return __riscv_vlmul_trunc_i16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4(
@@ -1750,7 +1750,7 @@ vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) {
- return vlmul_trunc_i16mf4(op1);
+ return __riscv_vlmul_trunc_i16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2(
@@ -1759,7 +1759,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) {
- return vlmul_trunc_i16mf2(op1);
+ return __riscv_vlmul_trunc_i16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1(
@@ -1768,7 +1768,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) {
- return vlmul_trunc_i16m1(op1);
+ return __riscv_vlmul_trunc_i16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2(
@@ -1777,7 +1777,7 @@ vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) {
- return vlmul_trunc_i16m2(op1);
+ return __riscv_vlmul_trunc_i16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4(
@@ -1786,7 +1786,7 @@ vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) {
- return vlmul_trunc_i16mf4(op1);
+ return __riscv_vlmul_trunc_i16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2(
@@ -1795,7 +1795,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) {
- return vlmul_trunc_i16mf2(op1);
+ return __riscv_vlmul_trunc_i16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1(
@@ -1804,7 +1804,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) {
- return vlmul_trunc_i16m1(op1);
+ return __riscv_vlmul_trunc_i16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2(
@@ -1813,7 +1813,7 @@ vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) {
- return vlmul_trunc_i16m2(op1);
+ return __riscv_vlmul_trunc_i16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4(
@@ -1822,7 +1822,7 @@ vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) {
- return vlmul_trunc_i16m4(op1);
+ return __riscv_vlmul_trunc_i16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2(
@@ -1831,7 +1831,7 @@ vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) {
- return vlmul_trunc_i32mf2(op1);
+ return __riscv_vlmul_trunc_i32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2(
@@ -1840,7 +1840,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) {
- return vlmul_trunc_i32mf2(op1);
+ return __riscv_vlmul_trunc_i32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1(
@@ -1849,7 +1849,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) {
- return vlmul_trunc_i32m1(op1);
+ return __riscv_vlmul_trunc_i32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2(
@@ -1858,7 +1858,7 @@ vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) {
- return vlmul_trunc_i32mf2(op1);
+ return __riscv_vlmul_trunc_i32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1(
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) {
- return vlmul_trunc_i32m1(op1);
+ return __riscv_vlmul_trunc_i32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2(
@@ -1876,7 +1876,7 @@ vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) {
- return vlmul_trunc_i32m2(op1);
+ return __riscv_vlmul_trunc_i32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2(
@@ -1885,7 +1885,7 @@ vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) {
- return vlmul_trunc_i32mf2(op1);
+ return __riscv_vlmul_trunc_i32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1(
@@ -1894,7 +1894,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) {
- return vlmul_trunc_i32m1(op1);
+ return __riscv_vlmul_trunc_i32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2(
@@ -1903,7 +1903,7 @@ vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) {
- return vlmul_trunc_i32m2(op1);
+ return __riscv_vlmul_trunc_i32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4(
@@ -1912,7 +1912,7 @@ vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) {
- return vlmul_trunc_i32m4(op1);
+ return __riscv_vlmul_trunc_i32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1(
@@ -1921,7 +1921,7 @@ vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) {
- return vlmul_trunc_i64m1(op1);
+ return __riscv_vlmul_trunc_i64m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1(
@@ -1930,7 +1930,7 @@ vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) {
- return vlmul_trunc_i64m1(op1);
+ return __riscv_vlmul_trunc_i64m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2(
@@ -1939,7 +1939,7 @@ vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) {
- return vlmul_trunc_i64m2(op1);
+ return __riscv_vlmul_trunc_i64m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1(
@@ -1948,7 +1948,7 @@ vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) {
- return vlmul_trunc_i64m1(op1);
+ return __riscv_vlmul_trunc_i64m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2(
@@ -1957,7 +1957,7 @@ vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) {
- return vlmul_trunc_i64m2(op1);
+ return __riscv_vlmul_trunc_i64m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4(
@@ -1966,7 +1966,7 @@ vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) {
- return vlmul_trunc_i64m4(op1);
+ return __riscv_vlmul_trunc_i64m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8(
@@ -1975,7 +1975,7 @@ vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) {
- return vlmul_trunc_u8mf8(op1);
+ return __riscv_vlmul_trunc_u8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8(
@@ -1984,7 +1984,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) {
- return vlmul_trunc_u8mf8(op1);
+ return __riscv_vlmul_trunc_u8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4(
@@ -1993,7 +1993,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) {
- return vlmul_trunc_u8mf4(op1);
+ return __riscv_vlmul_trunc_u8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8(
@@ -2002,7 +2002,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) {
- return vlmul_trunc_u8mf8(op1);
+ return __riscv_vlmul_trunc_u8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4(
@@ -2011,7 +2011,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) {
- return vlmul_trunc_u8mf4(op1);
+ return __riscv_vlmul_trunc_u8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2(
@@ -2020,7 +2020,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) {
- return vlmul_trunc_u8mf2(op1);
+ return __riscv_vlmul_trunc_u8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8(
@@ -2029,7 +2029,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) {
- return vlmul_trunc_u8mf8(op1);
+ return __riscv_vlmul_trunc_u8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4(
@@ -2038,7 +2038,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) {
- return vlmul_trunc_u8mf4(op1);
+ return __riscv_vlmul_trunc_u8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2(
@@ -2047,7 +2047,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) {
- return vlmul_trunc_u8mf2(op1);
+ return __riscv_vlmul_trunc_u8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1(
@@ -2056,7 +2056,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) {
- return vlmul_trunc_u8m1(op1);
+ return __riscv_vlmul_trunc_u8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8(
@@ -2065,7 +2065,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) {
- return vlmul_trunc_u8mf8(op1);
+ return __riscv_vlmul_trunc_u8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4(
@@ -2074,7 +2074,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) {
- return vlmul_trunc_u8mf4(op1);
+ return __riscv_vlmul_trunc_u8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2(
@@ -2083,7 +2083,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) {
- return vlmul_trunc_u8mf2(op1);
+ return __riscv_vlmul_trunc_u8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1(
@@ -2092,7 +2092,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) {
- return vlmul_trunc_u8m1(op1);
+ return __riscv_vlmul_trunc_u8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2(
@@ -2101,7 +2101,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) {
- return vlmul_trunc_u8m2(op1);
+ return __riscv_vlmul_trunc_u8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8(
@@ -2110,7 +2110,7 @@ vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) {
- return vlmul_trunc_u8mf8(op1);
+ return __riscv_vlmul_trunc_u8mf8(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4(
@@ -2119,7 +2119,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) {
- return vlmul_trunc_u8mf4(op1);
+ return __riscv_vlmul_trunc_u8mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2(
@@ -2128,7 +2128,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) {
- return vlmul_trunc_u8mf2(op1);
+ return __riscv_vlmul_trunc_u8mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1(
@@ -2137,7 +2137,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) {
- return vlmul_trunc_u8m1(op1);
+ return __riscv_vlmul_trunc_u8m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2(
@@ -2146,7 +2146,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) {
- return vlmul_trunc_u8m2(op1);
+ return __riscv_vlmul_trunc_u8m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4(
@@ -2155,7 +2155,7 @@ vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) {
- return vlmul_trunc_u8m4(op1);
+ return __riscv_vlmul_trunc_u8m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4(
@@ -2164,7 +2164,7 @@ vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) {
- return vlmul_trunc_u16mf4(op1);
+ return __riscv_vlmul_trunc_u16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4(
@@ -2173,7 +2173,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) {
- return vlmul_trunc_u16mf4(op1);
+ return __riscv_vlmul_trunc_u16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2(
@@ -2182,7 +2182,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) {
- return vlmul_trunc_u16mf2(op1);
+ return __riscv_vlmul_trunc_u16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4(
@@ -2191,7 +2191,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) {
- return vlmul_trunc_u16mf4(op1);
+ return __riscv_vlmul_trunc_u16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2(
@@ -2200,7 +2200,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) {
- return vlmul_trunc_u16mf2(op1);
+ return __riscv_vlmul_trunc_u16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1(
@@ -2209,7 +2209,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) {
- return vlmul_trunc_u16m1(op1);
+ return __riscv_vlmul_trunc_u16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4(
@@ -2218,7 +2218,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) {
- return vlmul_trunc_u16mf4(op1);
+ return __riscv_vlmul_trunc_u16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2(
@@ -2227,7 +2227,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) {
- return vlmul_trunc_u16mf2(op1);
+ return __riscv_vlmul_trunc_u16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1(
@@ -2236,7 +2236,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) {
- return vlmul_trunc_u16m1(op1);
+ return __riscv_vlmul_trunc_u16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2(
@@ -2245,7 +2245,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) {
- return vlmul_trunc_u16m2(op1);
+ return __riscv_vlmul_trunc_u16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4(
@@ -2254,7 +2254,7 @@ vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) {
- return vlmul_trunc_u16mf4(op1);
+ return __riscv_vlmul_trunc_u16mf4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2(
@@ -2263,7 +2263,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) {
- return vlmul_trunc_u16mf2(op1);
+ return __riscv_vlmul_trunc_u16mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1(
@@ -2272,7 +2272,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) {
- return vlmul_trunc_u16m1(op1);
+ return __riscv_vlmul_trunc_u16m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2(
@@ -2281,7 +2281,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) {
- return vlmul_trunc_u16m2(op1);
+ return __riscv_vlmul_trunc_u16m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4(
@@ -2290,7 +2290,7 @@ vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) {
- return vlmul_trunc_u16m4(op1);
+ return __riscv_vlmul_trunc_u16m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2(
@@ -2299,7 +2299,7 @@ vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) {
- return vlmul_trunc_u32mf2(op1);
+ return __riscv_vlmul_trunc_u32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2(
@@ -2308,7 +2308,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) {
- return vlmul_trunc_u32mf2(op1);
+ return __riscv_vlmul_trunc_u32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1(
@@ -2317,7 +2317,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) {
- return vlmul_trunc_u32m1(op1);
+ return __riscv_vlmul_trunc_u32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2(
@@ -2326,7 +2326,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) {
- return vlmul_trunc_u32mf2(op1);
+ return __riscv_vlmul_trunc_u32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1(
@@ -2335,7 +2335,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) {
- return vlmul_trunc_u32m1(op1);
+ return __riscv_vlmul_trunc_u32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2(
@@ -2344,7 +2344,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) {
- return vlmul_trunc_u32m2(op1);
+ return __riscv_vlmul_trunc_u32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2(
@@ -2353,7 +2353,7 @@ vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) {
- return vlmul_trunc_u32mf2(op1);
+ return __riscv_vlmul_trunc_u32mf2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1(
@@ -2362,7 +2362,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) {
- return vlmul_trunc_u32m1(op1);
+ return __riscv_vlmul_trunc_u32m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2(
@@ -2371,7 +2371,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) {
- return vlmul_trunc_u32m2(op1);
+ return __riscv_vlmul_trunc_u32m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4(
@@ -2380,7 +2380,7 @@ vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) {
- return vlmul_trunc_u32m4(op1);
+ return __riscv_vlmul_trunc_u32m4(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1(
@@ -2389,7 +2389,7 @@ vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) {
- return vlmul_trunc_u64m1(op1);
+ return __riscv_vlmul_trunc_u64m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1(
@@ -2398,7 +2398,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) {
- return vlmul_trunc_u64m1(op1);
+ return __riscv_vlmul_trunc_u64m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2(
@@ -2407,7 +2407,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) {
- return vlmul_trunc_u64m2(op1);
+ return __riscv_vlmul_trunc_u64m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1(
@@ -2416,7 +2416,7 @@ vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) {
- return vlmul_trunc_u64m1(op1);
+ return __riscv_vlmul_trunc_u64m1(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2(
@@ -2425,7 +2425,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) {
- return vlmul_trunc_u64m2(op1);
+ return __riscv_vlmul_trunc_u64m2(op1);
}
// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4(
@@ -2434,6 +2434,6 @@ vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) {
- return vlmul_trunc_u64m4(op1);
+ return __riscv_vlmul_trunc_u64m4(op1);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei16.c
index 8fa36149658e..ecc6c3a0be81 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vloxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4(
@@ -157,7 +157,7 @@ vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2(
@@ -166,7 +166,7 @@ vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1(
@@ -175,7 +175,7 @@ vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2(
@@ -184,7 +184,7 @@ vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4(
@@ -193,7 +193,7 @@ vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4(
@@ -202,7 +202,7 @@ vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2(
@@ -211,7 +211,7 @@ vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1(
@@ -220,7 +220,7 @@ vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2(
@@ -229,7 +229,7 @@ vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4(
@@ -238,7 +238,7 @@ vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8(
@@ -247,7 +247,7 @@ vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2(
@@ -256,7 +256,7 @@ vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1(
@@ -265,7 +265,7 @@ vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2(
@@ -274,7 +274,7 @@ vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4(
@@ -283,7 +283,7 @@ vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8(
@@ -292,7 +292,7 @@ vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1(
@@ -301,7 +301,7 @@ vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2(
@@ -310,7 +310,7 @@ vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4(
@@ -319,7 +319,7 @@ vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8(
@@ -328,7 +328,7 @@ vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8(
@@ -337,7 +337,7 @@ vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4(
@@ -346,7 +346,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2(
@@ -355,7 +355,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1(
@@ -364,7 +364,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2(
@@ -373,7 +373,7 @@ vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4(
@@ -382,7 +382,7 @@ vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4(
@@ -391,7 +391,7 @@ vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2(
@@ -400,7 +400,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1(
@@ -409,7 +409,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2(
@@ -418,7 +418,7 @@ vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4(
@@ -427,7 +427,7 @@ vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8(
@@ -436,7 +436,7 @@ vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2(
@@ -445,7 +445,7 @@ vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1(
@@ -454,7 +454,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2(
@@ -463,7 +463,7 @@ vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4(
@@ -472,7 +472,7 @@ vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8(
@@ -481,7 +481,7 @@ vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1(
@@ -490,7 +490,7 @@ vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2(
@@ -499,7 +499,7 @@ vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4(
@@ -508,7 +508,7 @@ vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8(
@@ -517,7 +517,7 @@ vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(base, bindex, vl);
+ return __riscv_vloxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m(
@@ -526,7 +526,7 @@ vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m(
@@ -535,7 +535,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m(
@@ -544,7 +544,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m(
@@ -553,7 +553,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m(
@@ -562,7 +562,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m(
@@ -571,7 +571,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m(
@@ -580,7 +580,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m(
@@ -589,7 +589,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m(
@@ -598,7 +598,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m(
@@ -607,7 +607,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m(
@@ -616,7 +616,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m(
@@ -625,7 +625,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m(
@@ -634,7 +634,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m(
@@ -643,7 +643,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m(
@@ -652,7 +652,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m(
@@ -661,7 +661,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m(
@@ -670,7 +670,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m(
@@ -679,7 +679,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m(
@@ -688,7 +688,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m(
@@ -697,7 +697,7 @@ vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m(
@@ -706,7 +706,7 @@ vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m(
@@ -715,7 +715,7 @@ vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m(
@@ -724,7 +724,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m(
@@ -733,7 +733,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m(
@@ -742,7 +742,7 @@ vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m(
@@ -751,7 +751,7 @@ vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m(
@@ -760,7 +760,7 @@ vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m(
@@ -769,7 +769,7 @@ vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m(
@@ -778,7 +778,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m(
@@ -787,7 +787,7 @@ vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m(
@@ -796,7 +796,7 @@ vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m(
@@ -805,7 +805,7 @@ vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m(
@@ -814,7 +814,7 @@ vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m(
@@ -823,7 +823,7 @@ vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m(
@@ -832,7 +832,7 @@ vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m(
@@ -841,7 +841,7 @@ vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m(
@@ -850,7 +850,7 @@ vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m(
@@ -859,7 +859,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m(
@@ -868,7 +868,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m(
@@ -877,7 +877,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m(
@@ -886,7 +886,7 @@ vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m(
@@ -895,7 +895,7 @@ vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m(
@@ -904,7 +904,7 @@ vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m(
@@ -913,7 +913,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m(
@@ -922,7 +922,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m(
@@ -931,7 +931,7 @@ vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m(
@@ -940,7 +940,7 @@ vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m(
@@ -949,7 +949,7 @@ vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m(
@@ -958,7 +958,7 @@ vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m(
@@ -967,7 +967,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m(
@@ -976,7 +976,7 @@ vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m(
@@ -985,7 +985,7 @@ vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m(
@@ -994,7 +994,7 @@ vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m(
@@ -1003,7 +1003,7 @@ vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m(
@@ -1012,7 +1012,7 @@ vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m(
@@ -1021,7 +1021,7 @@ vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m(
@@ -1030,6 +1030,6 @@ vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16(mask, base, bindex, vl);
+ return __riscv_vloxei16(mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei32.c
index 24d97ca92c3c..aeda8f14ef23 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1(
@@ -67,7 +67,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4(
@@ -85,7 +85,7 @@ vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8(
@@ -94,7 +94,7 @@ vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4(
@@ -121,7 +121,7 @@ vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8(
@@ -130,7 +130,7 @@ vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8(
@@ -139,7 +139,7 @@ vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4(
@@ -148,7 +148,7 @@ vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2(
@@ -157,7 +157,7 @@ vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1(
@@ -166,7 +166,7 @@ vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2(
@@ -175,7 +175,7 @@ vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4(
@@ -184,7 +184,7 @@ vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2(
@@ -193,7 +193,7 @@ vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1(
@@ -202,7 +202,7 @@ vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2(
@@ -211,7 +211,7 @@ vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4(
@@ -220,7 +220,7 @@ vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2(
@@ -229,7 +229,7 @@ vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1(
@@ -238,7 +238,7 @@ vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2(
@@ -247,7 +247,7 @@ vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4(
@@ -256,7 +256,7 @@ vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8(
@@ -265,7 +265,7 @@ vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1(
@@ -274,7 +274,7 @@ vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2(
@@ -283,7 +283,7 @@ vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4(
@@ -292,7 +292,7 @@ vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8(
@@ -301,7 +301,7 @@ vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8(
@@ -310,7 +310,7 @@ vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4(
@@ -319,7 +319,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2(
@@ -328,7 +328,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1(
@@ -337,7 +337,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2(
@@ -346,7 +346,7 @@ vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4(
@@ -355,7 +355,7 @@ vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2(
@@ -364,7 +364,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1(
@@ -373,7 +373,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2(
@@ -382,7 +382,7 @@ vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4(
@@ -391,7 +391,7 @@ vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2(
@@ -400,7 +400,7 @@ vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1(
@@ -409,7 +409,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2(
@@ -418,7 +418,7 @@ vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4(
@@ -427,7 +427,7 @@ vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8(
@@ -436,7 +436,7 @@ vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1(
@@ -445,7 +445,7 @@ vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2(
@@ -454,7 +454,7 @@ vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4(
@@ -463,7 +463,7 @@ vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8(
@@ -472,7 +472,7 @@ vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(base, bindex, vl);
+ return __riscv_vloxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m(
@@ -481,7 +481,7 @@ vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m(
@@ -490,7 +490,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m(
@@ -499,7 +499,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m(
@@ -508,7 +508,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m(
@@ -517,7 +517,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m(
@@ -526,7 +526,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m(
@@ -535,7 +535,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m(
@@ -544,7 +544,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m(
@@ -553,7 +553,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m(
@@ -562,7 +562,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m(
@@ -571,7 +571,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m(
@@ -580,7 +580,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m(
@@ -589,7 +589,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m(
@@ -598,7 +598,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m(
@@ -607,7 +607,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m(
@@ -616,7 +616,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m(
@@ -625,7 +625,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m(
@@ -634,7 +634,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m(
@@ -643,7 +643,7 @@ vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m(
@@ -652,7 +652,7 @@ vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m(
@@ -661,7 +661,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m(
@@ -670,7 +670,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m(
@@ -679,7 +679,7 @@ vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m(
@@ -688,7 +688,7 @@ vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m(
@@ -697,7 +697,7 @@ vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m(
@@ -706,7 +706,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m(
@@ -715,7 +715,7 @@ vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m(
@@ -724,7 +724,7 @@ vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m(
@@ -733,7 +733,7 @@ vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m(
@@ -742,7 +742,7 @@ vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m(
@@ -751,7 +751,7 @@ vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m(
@@ -760,7 +760,7 @@ vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m(
@@ -769,7 +769,7 @@ vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m(
@@ -778,7 +778,7 @@ vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m(
@@ -787,7 +787,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m(
@@ -796,7 +796,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m(
@@ -805,7 +805,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m(
@@ -814,7 +814,7 @@ vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m(
@@ -823,7 +823,7 @@ vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m(
@@ -832,7 +832,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m(
@@ -841,7 +841,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m(
@@ -850,7 +850,7 @@ vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m(
@@ -859,7 +859,7 @@ vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m(
@@ -868,7 +868,7 @@ vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m(
@@ -877,7 +877,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m(
@@ -886,7 +886,7 @@ vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m(
@@ -895,7 +895,7 @@ vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m(
@@ -904,7 +904,7 @@ vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m(
@@ -913,7 +913,7 @@ vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m(
@@ -922,7 +922,7 @@ vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m(
@@ -931,7 +931,7 @@ vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m(
@@ -940,6 +940,6 @@ vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32(mask, base, bindex, vl);
+ return __riscv_vloxei32(mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei64.c
index 47e384f91df6..c18637b77e7e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1(
@@ -58,7 +58,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4(
@@ -76,7 +76,7 @@ vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2(
@@ -94,7 +94,7 @@ vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4(
@@ -103,7 +103,7 @@ vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8(
@@ -112,7 +112,7 @@ vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8(
@@ -121,7 +121,7 @@ vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4(
@@ -130,7 +130,7 @@ vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2(
@@ -139,7 +139,7 @@ vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1(
@@ -148,7 +148,7 @@ vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4(
@@ -157,7 +157,7 @@ vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2(
@@ -166,7 +166,7 @@ vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1(
@@ -175,7 +175,7 @@ vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2(
@@ -184,7 +184,7 @@ vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2(
@@ -193,7 +193,7 @@ vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1(
@@ -202,7 +202,7 @@ vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2(
@@ -211,7 +211,7 @@ vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4(
@@ -220,7 +220,7 @@ vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1(
@@ -229,7 +229,7 @@ vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2(
@@ -238,7 +238,7 @@ vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4(
@@ -247,7 +247,7 @@ vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8(
@@ -256,7 +256,7 @@ vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8(
@@ -265,7 +265,7 @@ vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4(
@@ -274,7 +274,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2(
@@ -283,7 +283,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1(
@@ -292,7 +292,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4(
@@ -301,7 +301,7 @@ vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2(
@@ -310,7 +310,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1(
@@ -319,7 +319,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2(
@@ -328,7 +328,7 @@ vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2(
@@ -337,7 +337,7 @@ vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1(
@@ -346,7 +346,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2(
@@ -355,7 +355,7 @@ vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4(
@@ -364,7 +364,7 @@ vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1(
@@ -373,7 +373,7 @@ vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2(
@@ -382,7 +382,7 @@ vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4(
@@ -391,7 +391,7 @@ vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8(
@@ -400,7 +400,7 @@ vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(base, bindex, vl);
+ return __riscv_vloxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m(
@@ -409,7 +409,7 @@ vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m(
@@ -418,7 +418,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m(
@@ -427,7 +427,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m(
@@ -436,7 +436,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m(
@@ -445,7 +445,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m(
@@ -454,7 +454,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m(
@@ -463,7 +463,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m(
@@ -472,7 +472,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m(
@@ -499,7 +499,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m(
@@ -508,7 +508,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m(
@@ -517,7 +517,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m(
@@ -526,7 +526,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m(
@@ -535,7 +535,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m(
@@ -544,7 +544,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m(
@@ -553,7 +553,7 @@ vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m(
@@ -562,7 +562,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m(
@@ -571,7 +571,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m(
@@ -580,7 +580,7 @@ vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m(
@@ -589,7 +589,7 @@ vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m(
@@ -598,7 +598,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m(
@@ -607,7 +607,7 @@ vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m(
@@ -616,7 +616,7 @@ vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m(
@@ -625,7 +625,7 @@ vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m(
@@ -634,7 +634,7 @@ vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m(
@@ -643,7 +643,7 @@ vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m(
@@ -652,7 +652,7 @@ vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m(
@@ -661,7 +661,7 @@ vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m(
@@ -670,7 +670,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m(
@@ -679,7 +679,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m(
@@ -688,7 +688,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m(
@@ -697,7 +697,7 @@ vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m(
@@ -706,7 +706,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m(
@@ -715,7 +715,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m(
@@ -724,7 +724,7 @@ vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m(
@@ -733,7 +733,7 @@ vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m(
@@ -742,7 +742,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m(
@@ -751,7 +751,7 @@ vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m(
@@ -760,7 +760,7 @@ vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m(
@@ -769,7 +769,7 @@ vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m(
@@ -778,7 +778,7 @@ vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m(
@@ -787,7 +787,7 @@ vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m(
@@ -796,6 +796,6 @@ vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64(mask, base, bindex, vl);
+ return __riscv_vloxei64(mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei8.c
index 8414385cc3b4..bd7e7fd37a98 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vloxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4(
@@ -157,7 +157,7 @@ vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2(
@@ -166,7 +166,7 @@ vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1(
@@ -175,7 +175,7 @@ vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2(
@@ -184,7 +184,7 @@ vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4(
@@ -193,7 +193,7 @@ vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8(
@@ -202,7 +202,7 @@ vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4(
@@ -211,7 +211,7 @@ vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2(
@@ -220,7 +220,7 @@ vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1(
@@ -229,7 +229,7 @@ vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2(
@@ -238,7 +238,7 @@ vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4(
@@ -247,7 +247,7 @@ vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8(
@@ -256,7 +256,7 @@ vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2(
@@ -265,7 +265,7 @@ vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1(
@@ -274,7 +274,7 @@ vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4(
@@ -292,7 +292,7 @@ vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8(
@@ -301,7 +301,7 @@ vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1(
@@ -310,7 +310,7 @@ vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2(
@@ -319,7 +319,7 @@ vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4(
@@ -328,7 +328,7 @@ vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8(
@@ -337,7 +337,7 @@ vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8(
@@ -346,7 +346,7 @@ vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2(
@@ -382,7 +382,7 @@ vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4(
@@ -391,7 +391,7 @@ vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8(
@@ -400,7 +400,7 @@ vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4(
@@ -409,7 +409,7 @@ vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2(
@@ -436,7 +436,7 @@ vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4(
@@ -445,7 +445,7 @@ vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8(
@@ -454,7 +454,7 @@ vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2(
@@ -463,7 +463,7 @@ vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2(
@@ -481,7 +481,7 @@ vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4(
@@ -490,7 +490,7 @@ vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8(
@@ -499,7 +499,7 @@ vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1(
@@ -508,7 +508,7 @@ vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2(
@@ -517,7 +517,7 @@ vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4(
@@ -526,7 +526,7 @@ vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8(
@@ -535,7 +535,7 @@ vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(base, bindex, vl);
+ return __riscv_vloxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m(
@@ -544,7 +544,7 @@ vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m(
@@ -553,7 +553,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m(
@@ -562,7 +562,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m(
@@ -571,7 +571,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m(
@@ -580,7 +580,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m(
@@ -598,7 +598,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m(
@@ -607,7 +607,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m(
@@ -616,7 +616,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m(
@@ -625,7 +625,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m(
@@ -634,7 +634,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m(
@@ -643,7 +643,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m(
@@ -652,7 +652,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m(
@@ -661,7 +661,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m(
@@ -670,7 +670,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m(
@@ -679,7 +679,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m(
@@ -688,7 +688,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m(
@@ -697,7 +697,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m(
@@ -706,7 +706,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m(
@@ -715,7 +715,7 @@ vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m(
@@ -724,7 +724,7 @@ vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m(
@@ -733,7 +733,7 @@ vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bi
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m(
@@ -742,7 +742,7 @@ vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m(
@@ -751,7 +751,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m(
@@ -760,7 +760,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m(
@@ -769,7 +769,7 @@ vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m(
@@ -778,7 +778,7 @@ vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m(
@@ -787,7 +787,7 @@ vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m(
@@ -796,7 +796,7 @@ vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m(
@@ -805,7 +805,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m(
@@ -814,7 +814,7 @@ vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m(
@@ -823,7 +823,7 @@ vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m(
@@ -832,7 +832,7 @@ vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m(
@@ -841,7 +841,7 @@ vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m(
@@ -850,7 +850,7 @@ vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m(
@@ -859,7 +859,7 @@ vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m(
@@ -868,7 +868,7 @@ vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m(
@@ -877,7 +877,7 @@ vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m(
@@ -886,7 +886,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m(
@@ -895,7 +895,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m(
@@ -904,7 +904,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m(
@@ -913,7 +913,7 @@ vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m(
@@ -922,7 +922,7 @@ vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m(
@@ -931,7 +931,7 @@ vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m(
@@ -940,7 +940,7 @@ vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m(
@@ -967,7 +967,7 @@ vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m(
@@ -976,7 +976,7 @@ vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m(
@@ -985,7 +985,7 @@ vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m(
@@ -994,7 +994,7 @@ vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m(
@@ -1003,7 +1003,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m(
@@ -1012,7 +1012,7 @@ vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m(
@@ -1021,7 +1021,7 @@ vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m(
@@ -1030,7 +1030,7 @@ vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m(
@@ -1039,7 +1039,7 @@ vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m(
@@ -1048,7 +1048,7 @@ vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m(
@@ -1057,7 +1057,7 @@ vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m(
@@ -1066,6 +1066,6 @@ vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8(mask, base, bindex, vl);
+ return __riscv_vloxei8(mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c
index 3e77d3e42994..6e0b4145bae2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2(
@@ -30,7 +30,7 @@ void test_vloxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1(
@@ -43,7 +43,7 @@ void test_vloxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2(
@@ -56,7 +56,7 @@ void test_vloxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4(
@@ -69,7 +69,7 @@ void test_vloxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2(
@@ -82,7 +82,7 @@ void test_vloxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1(
@@ -95,7 +95,7 @@ void test_vloxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2(
@@ -108,7 +108,7 @@ void test_vloxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4(
@@ -121,7 +121,7 @@ void test_vloxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1(
@@ -134,7 +134,7 @@ void test_vloxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2(
@@ -147,7 +147,7 @@ void test_vloxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4(
@@ -160,7 +160,7 @@ void test_vloxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8(
@@ -173,7 +173,7 @@ void test_vloxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4(
@@ -186,7 +186,7 @@ void test_vloxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2(
@@ -199,7 +199,7 @@ void test_vloxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1(
@@ -212,7 +212,7 @@ void test_vloxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2(
@@ -225,7 +225,7 @@ void test_vloxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4(
@@ -238,7 +238,7 @@ void test_vloxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4(
@@ -251,7 +251,7 @@ void test_vloxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2(
@@ -264,7 +264,7 @@ void test_vloxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1(
@@ -277,7 +277,7 @@ void test_vloxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2(
@@ -290,7 +290,7 @@ void test_vloxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4(
@@ -303,7 +303,7 @@ void test_vloxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2(
@@ -316,7 +316,7 @@ void test_vloxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1(
@@ -329,7 +329,7 @@ void test_vloxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2(
@@ -342,7 +342,7 @@ void test_vloxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4(
@@ -355,7 +355,7 @@ void test_vloxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1(
@@ -368,7 +368,7 @@ void test_vloxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2(
@@ -381,7 +381,7 @@ void test_vloxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4(
@@ -394,7 +394,7 @@ void test_vloxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8(
@@ -407,7 +407,7 @@ void test_vloxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4(
@@ -420,7 +420,7 @@ void test_vloxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2(
@@ -433,7 +433,7 @@ void test_vloxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1(
@@ -446,7 +446,7 @@ void test_vloxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2(
@@ -459,7 +459,7 @@ void test_vloxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4(
@@ -472,7 +472,7 @@ void test_vloxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4(
@@ -485,7 +485,7 @@ void test_vloxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2(
@@ -498,7 +498,7 @@ void test_vloxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1(
@@ -511,7 +511,7 @@ void test_vloxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2(
@@ -524,7 +524,7 @@ void test_vloxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4(
@@ -537,7 +537,7 @@ void test_vloxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2(
@@ -550,7 +550,7 @@ void test_vloxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1(
@@ -563,7 +563,7 @@ void test_vloxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2(
@@ -576,7 +576,7 @@ void test_vloxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4(
@@ -589,7 +589,7 @@ void test_vloxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1(
@@ -602,7 +602,7 @@ void test_vloxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2(
@@ -615,7 +615,7 @@ void test_vloxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4(
@@ -628,7 +628,7 @@ void test_vloxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m(
@@ -641,7 +641,7 @@ void test_vloxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_m(
@@ -654,7 +654,7 @@ void test_vloxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m(
@@ -667,7 +667,7 @@ void test_vloxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m(
@@ -680,7 +680,7 @@ void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m(
@@ -693,7 +693,7 @@ void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m(
@@ -706,7 +706,7 @@ void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m(
@@ -719,7 +719,7 @@ void test_vloxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_m(
@@ -732,7 +732,7 @@ void test_vloxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_m(
@@ -745,7 +745,7 @@ void test_vloxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_m(
@@ -758,7 +758,7 @@ void test_vloxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_m(
@@ -771,7 +771,7 @@ void test_vloxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_m(
@@ -784,7 +784,7 @@ void test_vloxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m(
@@ -797,7 +797,7 @@ void test_vloxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m(
@@ -810,7 +810,7 @@ void test_vloxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m(
@@ -823,7 +823,7 @@ void test_vloxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m(
@@ -836,7 +836,7 @@ void test_vloxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_m(
@@ -849,7 +849,7 @@ void test_vloxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_m(
@@ -862,7 +862,7 @@ void test_vloxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m(
@@ -875,7 +875,7 @@ void test_vloxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m(
@@ -888,7 +888,7 @@ void test_vloxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vloxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_m(
@@ -914,7 +914,7 @@ void test_vloxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_m(
@@ -927,7 +927,7 @@ void test_vloxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m(
@@ -940,7 +940,7 @@ void test_vloxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m(
@@ -953,7 +953,7 @@ void test_vloxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_m(
@@ -966,7 +966,7 @@ void test_vloxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m(
@@ -979,7 +979,7 @@ void test_vloxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_m(
@@ -992,7 +992,7 @@ void test_vloxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_m(
@@ -1005,7 +1005,7 @@ void test_vloxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_m(
@@ -1018,7 +1018,7 @@ void test_vloxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m(
@@ -1031,7 +1031,7 @@ void test_vloxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m(
@@ -1044,7 +1044,7 @@ void test_vloxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m(
@@ -1057,7 +1057,7 @@ void test_vloxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m(
@@ -1070,7 +1070,7 @@ void test_vloxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_m(
@@ -1083,7 +1083,7 @@ void test_vloxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_m(
@@ -1096,7 +1096,7 @@ void test_vloxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m(
@@ -1109,7 +1109,7 @@ void test_vloxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m(
@@ -1122,7 +1122,7 @@ void test_vloxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m(
@@ -1135,7 +1135,7 @@ void test_vloxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_m(
@@ -1148,7 +1148,7 @@ void test_vloxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m(
@@ -1161,7 +1161,7 @@ void test_vloxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m(
@@ -1174,7 +1174,7 @@ void test_vloxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m(
@@ -1187,7 +1187,7 @@ void test_vloxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_m(
@@ -1200,7 +1200,7 @@ void test_vloxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_m(
@@ -1213,7 +1213,7 @@ void test_vloxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_m(
@@ -1226,7 +1226,7 @@ void test_vloxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_m(
@@ -1239,7 +1239,7 @@ void test_vloxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_m(
@@ -1252,6 +1252,6 @@ void test_vloxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei16(v0, v1, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c
index 01302f2de636..73c00f3dede6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2(
@@ -30,7 +30,7 @@ void test_vloxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1(
@@ -43,7 +43,7 @@ void test_vloxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2(
@@ -56,7 +56,7 @@ void test_vloxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4(
@@ -69,7 +69,7 @@ void test_vloxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2(
@@ -82,7 +82,7 @@ void test_vloxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1(
@@ -95,7 +95,7 @@ void test_vloxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2(
@@ -108,7 +108,7 @@ void test_vloxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4(
@@ -121,7 +121,7 @@ void test_vloxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1(
@@ -134,7 +134,7 @@ void test_vloxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2(
@@ -147,7 +147,7 @@ void test_vloxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4(
@@ -160,7 +160,7 @@ void test_vloxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8(
@@ -173,7 +173,7 @@ void test_vloxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4(
@@ -186,7 +186,7 @@ void test_vloxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2(
@@ -199,7 +199,7 @@ void test_vloxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1(
@@ -212,7 +212,7 @@ void test_vloxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2(
@@ -225,7 +225,7 @@ void test_vloxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4(
@@ -238,7 +238,7 @@ void test_vloxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2(
@@ -251,7 +251,7 @@ void test_vloxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1(
@@ -264,7 +264,7 @@ void test_vloxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2(
@@ -277,7 +277,7 @@ void test_vloxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4(
@@ -290,7 +290,7 @@ void test_vloxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2(
@@ -303,7 +303,7 @@ void test_vloxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1(
@@ -316,7 +316,7 @@ void test_vloxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2(
@@ -329,7 +329,7 @@ void test_vloxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4(
@@ -342,7 +342,7 @@ void test_vloxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1(
@@ -355,7 +355,7 @@ void test_vloxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2(
@@ -368,7 +368,7 @@ void test_vloxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4(
@@ -381,7 +381,7 @@ void test_vloxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8(
@@ -394,7 +394,7 @@ void test_vloxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4(
@@ -407,7 +407,7 @@ void test_vloxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2(
@@ -420,7 +420,7 @@ void test_vloxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1(
@@ -433,7 +433,7 @@ void test_vloxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2(
@@ -446,7 +446,7 @@ void test_vloxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4(
@@ -459,7 +459,7 @@ void test_vloxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2(
@@ -472,7 +472,7 @@ void test_vloxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1(
@@ -485,7 +485,7 @@ void test_vloxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2(
@@ -498,7 +498,7 @@ void test_vloxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4(
@@ -511,7 +511,7 @@ void test_vloxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2(
@@ -524,7 +524,7 @@ void test_vloxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1(
@@ -537,7 +537,7 @@ void test_vloxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2(
@@ -550,7 +550,7 @@ void test_vloxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4(
@@ -563,7 +563,7 @@ void test_vloxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1(
@@ -576,7 +576,7 @@ void test_vloxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2(
@@ -589,7 +589,7 @@ void test_vloxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4(
@@ -602,7 +602,7 @@ void test_vloxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m(
@@ -615,7 +615,7 @@ void test_vloxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_m(
@@ -628,7 +628,7 @@ void test_vloxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m(
@@ -641,7 +641,7 @@ void test_vloxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m(
@@ -654,7 +654,7 @@ void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m(
@@ -667,7 +667,7 @@ void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m(
@@ -680,7 +680,7 @@ void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m(
@@ -693,7 +693,7 @@ void test_vloxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_m(
@@ -706,7 +706,7 @@ void test_vloxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_m(
@@ -719,7 +719,7 @@ void test_vloxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_m(
@@ -732,7 +732,7 @@ void test_vloxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_m(
@@ -745,7 +745,7 @@ void test_vloxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_m(
@@ -758,7 +758,7 @@ void test_vloxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m(
@@ -771,7 +771,7 @@ void test_vloxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m(
@@ -784,7 +784,7 @@ void test_vloxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m(
@@ -797,7 +797,7 @@ void test_vloxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m(
@@ -810,7 +810,7 @@ void test_vloxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_m(
@@ -823,7 +823,7 @@ void test_vloxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m(
@@ -836,7 +836,7 @@ void test_vloxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m(
@@ -849,7 +849,7 @@ void test_vloxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m(
@@ -862,7 +862,7 @@ void test_vloxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_m(
@@ -875,7 +875,7 @@ void test_vloxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_m(
@@ -888,7 +888,7 @@ void test_vloxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m(
@@ -901,7 +901,7 @@ void test_vloxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m(
@@ -914,7 +914,7 @@ void test_vloxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_m(
@@ -927,7 +927,7 @@ void test_vloxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_m(
@@ -940,7 +940,7 @@ void test_vloxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_m(
@@ -953,7 +953,7 @@ void test_vloxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_m(
@@ -966,7 +966,7 @@ void test_vloxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_m(
@@ -979,7 +979,7 @@ void test_vloxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m(
@@ -992,7 +992,7 @@ void test_vloxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m(
@@ -1005,7 +1005,7 @@ void test_vloxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m(
@@ -1018,7 +1018,7 @@ void test_vloxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m(
@@ -1031,7 +1031,7 @@ void test_vloxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m(
@@ -1044,7 +1044,7 @@ void test_vloxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m(
@@ -1057,7 +1057,7 @@ void test_vloxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m(
@@ -1070,7 +1070,7 @@ void test_vloxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m(
@@ -1083,7 +1083,7 @@ void test_vloxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_m(
@@ -1096,7 +1096,7 @@ void test_vloxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_m(
@@ -1109,7 +1109,7 @@ void test_vloxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m(
@@ -1122,7 +1122,7 @@ void test_vloxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m(
@@ -1135,7 +1135,7 @@ void test_vloxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_m(
@@ -1148,7 +1148,7 @@ void test_vloxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_m(
@@ -1161,7 +1161,7 @@ void test_vloxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_m(
@@ -1174,7 +1174,7 @@ void test_vloxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_m(
@@ -1187,7 +1187,7 @@ void test_vloxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_m(
@@ -1200,6 +1200,6 @@ void test_vloxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei32(v0, v1, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c
index 47ddf9de5772..b61b1a68b6f5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2(
@@ -30,7 +30,7 @@ void test_vloxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1(
@@ -43,7 +43,7 @@ void test_vloxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2(
@@ -56,7 +56,7 @@ void test_vloxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2(
@@ -69,7 +69,7 @@ void test_vloxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1(
@@ -82,7 +82,7 @@ void test_vloxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2(
@@ -95,7 +95,7 @@ void test_vloxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4(
@@ -108,7 +108,7 @@ void test_vloxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1(
@@ -121,7 +121,7 @@ void test_vloxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2(
@@ -134,7 +134,7 @@ void test_vloxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4(
@@ -147,7 +147,7 @@ void test_vloxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8(
@@ -160,7 +160,7 @@ void test_vloxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4(
@@ -173,7 +173,7 @@ void test_vloxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2(
@@ -186,7 +186,7 @@ void test_vloxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1(
@@ -199,7 +199,7 @@ void test_vloxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4(
@@ -212,7 +212,7 @@ void test_vloxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2(
@@ -225,7 +225,7 @@ void test_vloxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1(
@@ -238,7 +238,7 @@ void test_vloxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2(
@@ -251,7 +251,7 @@ void test_vloxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2(
@@ -264,7 +264,7 @@ void test_vloxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1(
@@ -277,7 +277,7 @@ void test_vloxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2(
@@ -290,7 +290,7 @@ void test_vloxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4(
@@ -303,7 +303,7 @@ void test_vloxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1(
@@ -316,7 +316,7 @@ void test_vloxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2(
@@ -329,7 +329,7 @@ void test_vloxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4(
@@ -342,7 +342,7 @@ void test_vloxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8(
@@ -355,7 +355,7 @@ void test_vloxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4(
@@ -368,7 +368,7 @@ void test_vloxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2(
@@ -381,7 +381,7 @@ void test_vloxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1(
@@ -394,7 +394,7 @@ void test_vloxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4(
@@ -407,7 +407,7 @@ void test_vloxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2(
@@ -420,7 +420,7 @@ void test_vloxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1(
@@ -433,7 +433,7 @@ void test_vloxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2(
@@ -446,7 +446,7 @@ void test_vloxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2(
@@ -459,7 +459,7 @@ void test_vloxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1(
@@ -472,7 +472,7 @@ void test_vloxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2(
@@ -485,7 +485,7 @@ void test_vloxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4(
@@ -498,7 +498,7 @@ void test_vloxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1(
@@ -511,7 +511,7 @@ void test_vloxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2(
@@ -524,7 +524,7 @@ void test_vloxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4(
@@ -537,7 +537,7 @@ void test_vloxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m(
@@ -550,7 +550,7 @@ void test_vloxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_m(
@@ -563,7 +563,7 @@ void test_vloxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m(
@@ -576,7 +576,7 @@ void test_vloxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m(
@@ -589,7 +589,7 @@ void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m(
@@ -602,7 +602,7 @@ void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m(
@@ -615,7 +615,7 @@ void test_vloxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_m(
@@ -628,7 +628,7 @@ void test_vloxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_m(
@@ -641,7 +641,7 @@ void test_vloxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_m(
@@ -654,7 +654,7 @@ void test_vloxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_m(
@@ -667,7 +667,7 @@ void test_vloxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_m(
@@ -680,7 +680,7 @@ void test_vloxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m(
@@ -693,7 +693,7 @@ void test_vloxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m(
@@ -706,7 +706,7 @@ void test_vloxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m(
@@ -719,7 +719,7 @@ void test_vloxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m(
@@ -732,7 +732,7 @@ void test_vloxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m(
@@ -745,7 +745,7 @@ void test_vloxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m(
@@ -758,7 +758,7 @@ void test_vloxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m(
@@ -771,7 +771,7 @@ void test_vloxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_m(
@@ -784,7 +784,7 @@ void test_vloxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m(
@@ -797,7 +797,7 @@ void test_vloxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m(
@@ -810,7 +810,7 @@ void test_vloxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_m(
@@ -823,7 +823,7 @@ void test_vloxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_m(
@@ -836,7 +836,7 @@ void test_vloxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_m(
@@ -849,7 +849,7 @@ void test_vloxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_m(
@@ -862,7 +862,7 @@ void test_vloxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_m(
@@ -875,7 +875,7 @@ void test_vloxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m(
@@ -888,7 +888,7 @@ void test_vloxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m(
@@ -901,7 +901,7 @@ void test_vloxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m(
@@ -914,7 +914,7 @@ void test_vloxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m(
@@ -927,7 +927,7 @@ void test_vloxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m(
@@ -940,7 +940,7 @@ void test_vloxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m(
@@ -953,7 +953,7 @@ void test_vloxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m(
@@ -966,7 +966,7 @@ void test_vloxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_m(
@@ -979,7 +979,7 @@ void test_vloxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m(
@@ -992,7 +992,7 @@ void test_vloxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m(
@@ -1005,7 +1005,7 @@ void test_vloxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_m(
@@ -1018,7 +1018,7 @@ void test_vloxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_m(
@@ -1031,7 +1031,7 @@ void test_vloxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_m(
@@ -1044,7 +1044,7 @@ void test_vloxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_m(
@@ -1057,7 +1057,7 @@ void test_vloxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m(
@@ -1070,6 +1070,6 @@ void test_vloxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei64(v0, v1, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c
index 16631371aed5..b7b186bd2cbe 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2(
@@ -30,7 +30,7 @@ void test_vloxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Floa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1(
@@ -43,7 +43,7 @@ void test_vloxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Floa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2(
@@ -56,7 +56,7 @@ void test_vloxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4(
@@ -69,7 +69,7 @@ void test_vloxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2(
@@ -82,7 +82,7 @@ void test_vloxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1(
@@ -95,7 +95,7 @@ void test_vloxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2(
@@ -108,7 +108,7 @@ void test_vloxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *b
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4(
@@ -121,7 +121,7 @@ void test_vloxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *b
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1(
@@ -134,7 +134,7 @@ void test_vloxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *b
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2(
@@ -147,7 +147,7 @@ void test_vloxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4(
@@ -160,7 +160,7 @@ void test_vloxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8(
@@ -173,7 +173,7 @@ void test_vloxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4(
@@ -186,7 +186,7 @@ void test_vloxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2(
@@ -199,7 +199,7 @@ void test_vloxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1(
@@ -212,7 +212,7 @@ void test_vloxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2(
@@ -225,7 +225,7 @@ void test_vloxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4(
@@ -238,7 +238,7 @@ void test_vloxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4(
@@ -251,7 +251,7 @@ void test_vloxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2(
@@ -264,7 +264,7 @@ void test_vloxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1(
@@ -277,7 +277,7 @@ void test_vloxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2(
@@ -290,7 +290,7 @@ void test_vloxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4(
@@ -303,7 +303,7 @@ void test_vloxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2(
@@ -316,7 +316,7 @@ void test_vloxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1(
@@ -329,7 +329,7 @@ void test_vloxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2(
@@ -342,7 +342,7 @@ void test_vloxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4(
@@ -355,7 +355,7 @@ void test_vloxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1(
@@ -368,7 +368,7 @@ void test_vloxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2(
@@ -381,7 +381,7 @@ void test_vloxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4(
@@ -394,7 +394,7 @@ void test_vloxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8(
@@ -407,7 +407,7 @@ void test_vloxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4(
@@ -420,7 +420,7 @@ void test_vloxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *b
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2(
@@ -433,7 +433,7 @@ void test_vloxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *b
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1(
@@ -446,7 +446,7 @@ void test_vloxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *b
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2(
@@ -459,7 +459,7 @@ void test_vloxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4(
@@ -472,7 +472,7 @@ void test_vloxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4(
@@ -485,7 +485,7 @@ void test_vloxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2(
@@ -498,7 +498,7 @@ void test_vloxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1(
@@ -511,7 +511,7 @@ void test_vloxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2(
@@ -524,7 +524,7 @@ void test_vloxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4(
@@ -537,7 +537,7 @@ void test_vloxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2(
@@ -550,7 +550,7 @@ void test_vloxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1(
@@ -563,7 +563,7 @@ void test_vloxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2(
@@ -576,7 +576,7 @@ void test_vloxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4(
@@ -589,7 +589,7 @@ void test_vloxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1(
@@ -602,7 +602,7 @@ void test_vloxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2(
@@ -615,7 +615,7 @@ void test_vloxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4(
@@ -628,7 +628,7 @@ void test_vloxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m(
@@ -641,7 +641,7 @@ void test_vloxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_m(
@@ -654,7 +654,7 @@ void test_vloxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m(
@@ -667,7 +667,7 @@ void test_vloxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m(
@@ -680,7 +680,7 @@ void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m(
@@ -693,7 +693,7 @@ void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m(
@@ -706,7 +706,7 @@ void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m(
@@ -719,7 +719,7 @@ void test_vloxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_m(
@@ -732,7 +732,7 @@ void test_vloxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_m(
@@ -745,7 +745,7 @@ void test_vloxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_m(
@@ -758,7 +758,7 @@ void test_vloxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_m(
@@ -771,7 +771,7 @@ void test_vloxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_m(
@@ -784,7 +784,7 @@ void test_vloxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m(
@@ -797,7 +797,7 @@ void test_vloxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m(
@@ -810,7 +810,7 @@ void test_vloxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m(
@@ -823,7 +823,7 @@ void test_vloxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m(
@@ -836,7 +836,7 @@ void test_vloxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_m(
@@ -849,7 +849,7 @@ void test_vloxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, cons
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_m(
@@ -862,7 +862,7 @@ void test_vloxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, cons
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m(
@@ -875,7 +875,7 @@ void test_vloxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, cons
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m(
@@ -888,7 +888,7 @@ void test_vloxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vloxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_m(
@@ -914,7 +914,7 @@ void test_vloxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_m(
@@ -927,7 +927,7 @@ void test_vloxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m(
@@ -940,7 +940,7 @@ void test_vloxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m(
@@ -953,7 +953,7 @@ void test_vloxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_m(
@@ -966,7 +966,7 @@ void test_vloxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_m(
@@ -979,7 +979,7 @@ void test_vloxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_m(
@@ -992,7 +992,7 @@ void test_vloxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_m(
@@ -1005,7 +1005,7 @@ void test_vloxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_m(
@@ -1018,7 +1018,7 @@ void test_vloxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m(
@@ -1031,7 +1031,7 @@ void test_vloxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m(
@@ -1044,7 +1044,7 @@ void test_vloxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m(
@@ -1057,7 +1057,7 @@ void test_vloxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m(
@@ -1070,7 +1070,7 @@ void test_vloxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_m(
@@ -1083,7 +1083,7 @@ void test_vloxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m(
@@ -1096,7 +1096,7 @@ void test_vloxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m(
@@ -1109,7 +1109,7 @@ void test_vloxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m(
@@ -1122,7 +1122,7 @@ void test_vloxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m(
@@ -1135,7 +1135,7 @@ void test_vloxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_m(
@@ -1148,7 +1148,7 @@ void test_vloxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m(
@@ -1161,7 +1161,7 @@ void test_vloxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m(
@@ -1174,7 +1174,7 @@ void test_vloxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m(
@@ -1187,7 +1187,7 @@ void test_vloxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_m(
@@ -1200,7 +1200,7 @@ void test_vloxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m(
@@ -1213,7 +1213,7 @@ void test_vloxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_m(
@@ -1226,7 +1226,7 @@ void test_vloxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_m(
@@ -1239,7 +1239,7 @@ void test_vloxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_m(
@@ -1252,6 +1252,6 @@ void test_vloxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vloxseg2ei8(v0, v1, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c
index 23a26fb75c56..25e7cdcedbb2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2(
@@ -34,7 +34,7 @@ void test_vloxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1(
@@ -49,7 +49,7 @@ void test_vloxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2(
@@ -64,7 +64,7 @@ void test_vloxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2(
@@ -79,7 +79,7 @@ void test_vloxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1(
@@ -94,7 +94,7 @@ void test_vloxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2(
@@ -109,7 +109,7 @@ void test_vloxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1(
@@ -124,7 +124,7 @@ void test_vloxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2(
@@ -139,7 +139,7 @@ void test_vloxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8(
@@ -154,7 +154,7 @@ void test_vloxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4(
@@ -169,7 +169,7 @@ void test_vloxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2(
@@ -184,7 +184,7 @@ void test_vloxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1(
@@ -199,7 +199,7 @@ void test_vloxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2(
@@ -214,7 +214,7 @@ void test_vloxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4(
@@ -229,7 +229,7 @@ void test_vloxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2(
@@ -244,7 +244,7 @@ void test_vloxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1(
@@ -259,7 +259,7 @@ void test_vloxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2(
@@ -274,7 +274,7 @@ void test_vloxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2(
@@ -289,7 +289,7 @@ void test_vloxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1(
@@ -304,7 +304,7 @@ void test_vloxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2(
@@ -319,7 +319,7 @@ void test_vloxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1(
@@ -334,7 +334,7 @@ void test_vloxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2(
@@ -349,7 +349,7 @@ void test_vloxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8(
@@ -364,7 +364,7 @@ void test_vloxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4(
@@ -379,7 +379,7 @@ void test_vloxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2(
@@ -394,7 +394,7 @@ void test_vloxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1(
@@ -409,7 +409,7 @@ void test_vloxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2(
@@ -424,7 +424,7 @@ void test_vloxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4(
@@ -439,7 +439,7 @@ void test_vloxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2(
@@ -454,7 +454,7 @@ void test_vloxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1(
@@ -469,7 +469,7 @@ void test_vloxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2(
@@ -484,7 +484,7 @@ void test_vloxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2(
@@ -499,7 +499,7 @@ void test_vloxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1(
@@ -514,7 +514,7 @@ void test_vloxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2(
@@ -529,7 +529,7 @@ void test_vloxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1(
@@ -544,7 +544,7 @@ void test_vloxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2(
@@ -559,7 +559,7 @@ void test_vloxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_m(
@@ -574,7 +574,7 @@ void test_vloxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_m(
@@ -589,7 +589,7 @@ void test_vloxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m(
@@ -604,7 +604,7 @@ void test_vloxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m(
@@ -619,7 +619,7 @@ void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_m(
@@ -649,7 +649,7 @@ void test_vloxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_m(
@@ -664,7 +664,7 @@ void test_vloxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_m(
@@ -679,7 +679,7 @@ void test_vloxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_m(
@@ -694,7 +694,7 @@ void test_vloxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m(
@@ -709,7 +709,7 @@ void test_vloxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m(
@@ -724,7 +724,7 @@ void test_vloxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vloxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_m(
@@ -754,7 +754,7 @@ void test_vloxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_m(
@@ -769,7 +769,7 @@ void test_vloxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m(
@@ -784,7 +784,7 @@ void test_vloxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m(
@@ -799,7 +799,7 @@ void test_vloxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_m(
@@ -814,7 +814,7 @@ void test_vloxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_m(
@@ -829,7 +829,7 @@ void test_vloxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vloxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_m(
@@ -859,7 +859,7 @@ void test_vloxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_m(
@@ -874,7 +874,7 @@ void test_vloxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_m(
@@ -889,7 +889,7 @@ void test_vloxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_m(
@@ -904,7 +904,7 @@ void test_vloxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m(
@@ -919,7 +919,7 @@ void test_vloxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m(
@@ -934,7 +934,7 @@ void test_vloxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vloxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_m(
@@ -964,7 +964,7 @@ void test_vloxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_m(
@@ -979,7 +979,7 @@ void test_vloxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m(
@@ -994,7 +994,7 @@ void test_vloxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m(
@@ -1009,7 +1009,7 @@ void test_vloxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_m(
@@ -1024,7 +1024,7 @@ void test_vloxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_m(
@@ -1039,7 +1039,7 @@ void test_vloxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_m(
@@ -1069,7 +1069,7 @@ void test_vloxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_m(
@@ -1084,7 +1084,7 @@ void test_vloxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_m(
@@ -1099,7 +1099,7 @@ void test_vloxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_m(
@@ -1114,6 +1114,6 @@ void test_vloxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c
index 69cc75cfcf65..41256f5d93dd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2(
@@ -34,7 +34,7 @@ void test_vloxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1(
@@ -49,7 +49,7 @@ void test_vloxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2(
@@ -64,7 +64,7 @@ void test_vloxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2(
@@ -79,7 +79,7 @@ void test_vloxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1(
@@ -94,7 +94,7 @@ void test_vloxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2(
@@ -109,7 +109,7 @@ void test_vloxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1(
@@ -124,7 +124,7 @@ void test_vloxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2(
@@ -139,7 +139,7 @@ void test_vloxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8(
@@ -154,7 +154,7 @@ void test_vloxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4(
@@ -169,7 +169,7 @@ void test_vloxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2(
@@ -184,7 +184,7 @@ void test_vloxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1(
@@ -199,7 +199,7 @@ void test_vloxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2(
@@ -214,7 +214,7 @@ void test_vloxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4(
@@ -229,7 +229,7 @@ void test_vloxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2(
@@ -244,7 +244,7 @@ void test_vloxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1(
@@ -259,7 +259,7 @@ void test_vloxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2(
@@ -274,7 +274,7 @@ void test_vloxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2(
@@ -289,7 +289,7 @@ void test_vloxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1(
@@ -304,7 +304,7 @@ void test_vloxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2(
@@ -319,7 +319,7 @@ void test_vloxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1(
@@ -334,7 +334,7 @@ void test_vloxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2(
@@ -349,7 +349,7 @@ void test_vloxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8(
@@ -364,7 +364,7 @@ void test_vloxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4(
@@ -379,7 +379,7 @@ void test_vloxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2(
@@ -394,7 +394,7 @@ void test_vloxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1(
@@ -409,7 +409,7 @@ void test_vloxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2(
@@ -424,7 +424,7 @@ void test_vloxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4(
@@ -439,7 +439,7 @@ void test_vloxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2(
@@ -454,7 +454,7 @@ void test_vloxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1(
@@ -469,7 +469,7 @@ void test_vloxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2(
@@ -484,7 +484,7 @@ void test_vloxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2(
@@ -499,7 +499,7 @@ void test_vloxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1(
@@ -514,7 +514,7 @@ void test_vloxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2(
@@ -529,7 +529,7 @@ void test_vloxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1(
@@ -544,7 +544,7 @@ void test_vloxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2(
@@ -559,7 +559,7 @@ void test_vloxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_m(
@@ -574,7 +574,7 @@ void test_vloxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_m(
@@ -589,7 +589,7 @@ void test_vloxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m(
@@ -604,7 +604,7 @@ void test_vloxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m(
@@ -619,7 +619,7 @@ void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_m(
@@ -649,7 +649,7 @@ void test_vloxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_m(
@@ -664,7 +664,7 @@ void test_vloxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_m(
@@ -679,7 +679,7 @@ void test_vloxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_m(
@@ -694,7 +694,7 @@ void test_vloxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m(
@@ -709,7 +709,7 @@ void test_vloxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m(
@@ -724,7 +724,7 @@ void test_vloxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vloxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m(
@@ -754,7 +754,7 @@ void test_vloxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_m(
@@ -769,7 +769,7 @@ void test_vloxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m(
@@ -784,7 +784,7 @@ void test_vloxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m(
@@ -799,7 +799,7 @@ void test_vloxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_m(
@@ -814,7 +814,7 @@ void test_vloxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_m(
@@ -829,7 +829,7 @@ void test_vloxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vloxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_m(
@@ -859,7 +859,7 @@ void test_vloxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_m(
@@ -874,7 +874,7 @@ void test_vloxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_m(
@@ -889,7 +889,7 @@ void test_vloxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_m(
@@ -904,7 +904,7 @@ void test_vloxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m(
@@ -919,7 +919,7 @@ void test_vloxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m(
@@ -934,7 +934,7 @@ void test_vloxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vloxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m(
@@ -964,7 +964,7 @@ void test_vloxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m(
@@ -979,7 +979,7 @@ void test_vloxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m(
@@ -994,7 +994,7 @@ void test_vloxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m(
@@ -1009,7 +1009,7 @@ void test_vloxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_m(
@@ -1024,7 +1024,7 @@ void test_vloxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_m(
@@ -1039,7 +1039,7 @@ void test_vloxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_m(
@@ -1069,7 +1069,7 @@ void test_vloxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_m(
@@ -1084,7 +1084,7 @@ void test_vloxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_m(
@@ -1099,7 +1099,7 @@ void test_vloxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_m(
@@ -1114,6 +1114,6 @@ void test_vloxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c
index b0a69dd39a58..33300836a348 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2(
@@ -34,7 +34,7 @@ void test_vloxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1(
@@ -49,7 +49,7 @@ void test_vloxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2(
@@ -64,7 +64,7 @@ void test_vloxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2(
@@ -79,7 +79,7 @@ void test_vloxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1(
@@ -94,7 +94,7 @@ void test_vloxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2(
@@ -109,7 +109,7 @@ void test_vloxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1(
@@ -124,7 +124,7 @@ void test_vloxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2(
@@ -139,7 +139,7 @@ void test_vloxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8(
@@ -154,7 +154,7 @@ void test_vloxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4(
@@ -169,7 +169,7 @@ void test_vloxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2(
@@ -184,7 +184,7 @@ void test_vloxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1(
@@ -199,7 +199,7 @@ void test_vloxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4(
@@ -214,7 +214,7 @@ void test_vloxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2(
@@ -229,7 +229,7 @@ void test_vloxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1(
@@ -244,7 +244,7 @@ void test_vloxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2(
@@ -259,7 +259,7 @@ void test_vloxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2(
@@ -274,7 +274,7 @@ void test_vloxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1(
@@ -289,7 +289,7 @@ void test_vloxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2(
@@ -304,7 +304,7 @@ void test_vloxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1(
@@ -319,7 +319,7 @@ void test_vloxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2(
@@ -334,7 +334,7 @@ void test_vloxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8(
@@ -349,7 +349,7 @@ void test_vloxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4(
@@ -364,7 +364,7 @@ void test_vloxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2(
@@ -379,7 +379,7 @@ void test_vloxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1(
@@ -394,7 +394,7 @@ void test_vloxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4(
@@ -409,7 +409,7 @@ void test_vloxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2(
@@ -424,7 +424,7 @@ void test_vloxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1(
@@ -439,7 +439,7 @@ void test_vloxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2(
@@ -454,7 +454,7 @@ void test_vloxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2(
@@ -469,7 +469,7 @@ void test_vloxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1(
@@ -484,7 +484,7 @@ void test_vloxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2(
@@ -499,7 +499,7 @@ void test_vloxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1(
@@ -514,7 +514,7 @@ void test_vloxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2(
@@ -529,7 +529,7 @@ void test_vloxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_m(
@@ -544,7 +544,7 @@ void test_vloxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_m(
@@ -559,7 +559,7 @@ void test_vloxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m(
@@ -574,7 +574,7 @@ void test_vloxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m(
@@ -589,7 +589,7 @@ void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m(
@@ -604,7 +604,7 @@ void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_m(
@@ -619,7 +619,7 @@ void test_vloxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_m(
@@ -634,7 +634,7 @@ void test_vloxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_m(
@@ -649,7 +649,7 @@ void test_vloxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_m(
@@ -664,7 +664,7 @@ void test_vloxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m(
@@ -679,7 +679,7 @@ void test_vloxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m(
@@ -694,7 +694,7 @@ void test_vloxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m(
@@ -709,7 +709,7 @@ void test_vloxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_m(
@@ -724,7 +724,7 @@ void test_vloxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m(
@@ -739,7 +739,7 @@ void test_vloxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m(
@@ -754,7 +754,7 @@ void test_vloxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m(
@@ -769,7 +769,7 @@ void test_vloxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_m(
@@ -784,7 +784,7 @@ void test_vloxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m(
@@ -799,7 +799,7 @@ void test_vloxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_m(
@@ -814,7 +814,7 @@ void test_vloxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_m(
@@ -829,7 +829,7 @@ void test_vloxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_m(
@@ -844,7 +844,7 @@ void test_vloxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_m(
@@ -859,7 +859,7 @@ void test_vloxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m(
@@ -874,7 +874,7 @@ void test_vloxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m(
@@ -889,7 +889,7 @@ void test_vloxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m(
@@ -904,7 +904,7 @@ void test_vloxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_m(
@@ -919,7 +919,7 @@ void test_vloxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m(
@@ -934,7 +934,7 @@ void test_vloxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m(
@@ -949,7 +949,7 @@ void test_vloxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_m(
@@ -964,7 +964,7 @@ void test_vloxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_m(
@@ -979,7 +979,7 @@ void test_vloxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m(
@@ -994,7 +994,7 @@ void test_vloxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_m(
@@ -1009,7 +1009,7 @@ void test_vloxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_m(
@@ -1024,7 +1024,7 @@ void test_vloxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_m(
@@ -1039,7 +1039,7 @@ void test_vloxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_m(
@@ -1054,6 +1054,6 @@ void test_vloxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c
index 213940f013db..46f2e33b8153 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2(
@@ -34,7 +34,7 @@ void test_vloxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1(
@@ -49,7 +49,7 @@ void test_vloxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2(
@@ -64,7 +64,7 @@ void test_vloxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2(
@@ -79,7 +79,7 @@ void test_vloxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1(
@@ -94,7 +94,7 @@ void test_vloxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2(
@@ -109,7 +109,7 @@ void test_vloxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1(
@@ -124,7 +124,7 @@ void test_vloxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2(
@@ -139,7 +139,7 @@ void test_vloxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8(
@@ -154,7 +154,7 @@ void test_vloxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4(
@@ -169,7 +169,7 @@ void test_vloxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2(
@@ -184,7 +184,7 @@ void test_vloxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1(
@@ -199,7 +199,7 @@ void test_vloxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2(
@@ -214,7 +214,7 @@ void test_vloxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4(
@@ -229,7 +229,7 @@ void test_vloxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2(
@@ -244,7 +244,7 @@ void test_vloxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1(
@@ -259,7 +259,7 @@ void test_vloxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2(
@@ -274,7 +274,7 @@ void test_vloxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2(
@@ -289,7 +289,7 @@ void test_vloxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1(
@@ -304,7 +304,7 @@ void test_vloxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2(
@@ -319,7 +319,7 @@ void test_vloxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1(
@@ -334,7 +334,7 @@ void test_vloxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2(
@@ -349,7 +349,7 @@ void test_vloxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8(
@@ -364,7 +364,7 @@ void test_vloxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4(
@@ -379,7 +379,7 @@ void test_vloxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2(
@@ -394,7 +394,7 @@ void test_vloxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1(
@@ -409,7 +409,7 @@ void test_vloxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2(
@@ -424,7 +424,7 @@ void test_vloxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, con
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4(
@@ -439,7 +439,7 @@ void test_vloxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, con
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2(
@@ -454,7 +454,7 @@ void test_vloxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1(
@@ -469,7 +469,7 @@ void test_vloxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2(
@@ -484,7 +484,7 @@ void test_vloxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2(
@@ -499,7 +499,7 @@ void test_vloxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1(
@@ -514,7 +514,7 @@ void test_vloxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2(
@@ -529,7 +529,7 @@ void test_vloxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1(
@@ -544,7 +544,7 @@ void test_vloxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2(
@@ -559,7 +559,7 @@ void test_vloxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_m(
@@ -574,7 +574,7 @@ void test_vloxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_m(
@@ -589,7 +589,7 @@ void test_vloxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m(
@@ -604,7 +604,7 @@ void test_vloxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m(
@@ -619,7 +619,7 @@ void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_m(
@@ -649,7 +649,7 @@ void test_vloxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_m(
@@ -664,7 +664,7 @@ void test_vloxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_m(
@@ -679,7 +679,7 @@ void test_vloxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_m(
@@ -694,7 +694,7 @@ void test_vloxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m(
@@ -709,7 +709,7 @@ void test_vloxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m(
@@ -724,7 +724,7 @@ void test_vloxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vloxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_m(
@@ -754,7 +754,7 @@ void test_vloxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_m(
@@ -769,7 +769,7 @@ void test_vloxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m(
@@ -784,7 +784,7 @@ void test_vloxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m(
@@ -799,7 +799,7 @@ void test_vloxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m(
@@ -814,7 +814,7 @@ void test_vloxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_m(
@@ -829,7 +829,7 @@ void test_vloxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vloxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_m(
@@ -859,7 +859,7 @@ void test_vloxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_m(
@@ -874,7 +874,7 @@ void test_vloxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_m(
@@ -889,7 +889,7 @@ void test_vloxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_m(
@@ -904,7 +904,7 @@ void test_vloxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m(
@@ -919,7 +919,7 @@ void test_vloxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m(
@@ -934,7 +934,7 @@ void test_vloxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vloxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_m(
@@ -964,7 +964,7 @@ void test_vloxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_m(
@@ -979,7 +979,7 @@ void test_vloxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m(
@@ -994,7 +994,7 @@ void test_vloxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m(
@@ -1009,7 +1009,7 @@ void test_vloxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_m(
@@ -1024,7 +1024,7 @@ void test_vloxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_m(
@@ -1039,7 +1039,7 @@ void test_vloxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_m(
@@ -1069,7 +1069,7 @@ void test_vloxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_m(
@@ -1084,7 +1084,7 @@ void test_vloxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_m(
@@ -1099,7 +1099,7 @@ void test_vloxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_m(
@@ -1114,6 +1114,6 @@ void test_vloxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c
index a3b4749d36b7..876fd70662ee 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2(
@@ -38,7 +38,7 @@ void test_vloxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1(
@@ -55,7 +55,7 @@ void test_vloxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2(
@@ -72,7 +72,7 @@ void test_vloxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2(
@@ -89,7 +89,7 @@ void test_vloxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1(
@@ -106,7 +106,7 @@ void test_vloxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2(
@@ -123,7 +123,7 @@ void test_vloxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1(
@@ -140,7 +140,7 @@ void test_vloxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2(
@@ -157,7 +157,7 @@ void test_vloxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8(
@@ -174,7 +174,7 @@ void test_vloxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4(
@@ -191,7 +191,7 @@ void test_vloxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2(
@@ -208,7 +208,7 @@ void test_vloxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1(
@@ -225,7 +225,7 @@ void test_vloxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2(
@@ -242,7 +242,7 @@ void test_vloxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4(
@@ -259,7 +259,7 @@ void test_vloxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2(
@@ -276,7 +276,7 @@ void test_vloxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1(
@@ -293,7 +293,7 @@ void test_vloxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2(
@@ -310,7 +310,7 @@ void test_vloxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2(
@@ -327,7 +327,7 @@ void test_vloxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1(
@@ -344,7 +344,7 @@ void test_vloxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2(
@@ -361,7 +361,7 @@ void test_vloxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1(
@@ -378,7 +378,7 @@ void test_vloxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2(
@@ -395,7 +395,7 @@ void test_vloxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8(
@@ -412,7 +412,7 @@ void test_vloxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4(
@@ -429,7 +429,7 @@ void test_vloxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2(
@@ -446,7 +446,7 @@ void test_vloxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1(
@@ -463,7 +463,7 @@ void test_vloxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2(
@@ -480,7 +480,7 @@ void test_vloxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4(
@@ -497,7 +497,7 @@ void test_vloxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2(
@@ -514,7 +514,7 @@ void test_vloxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1(
@@ -531,7 +531,7 @@ void test_vloxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2(
@@ -548,7 +548,7 @@ void test_vloxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2(
@@ -565,7 +565,7 @@ void test_vloxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1(
@@ -582,7 +582,7 @@ void test_vloxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2(
@@ -599,7 +599,7 @@ void test_vloxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1(
@@ -616,7 +616,7 @@ void test_vloxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2(
@@ -633,7 +633,7 @@ void test_vloxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_m(
@@ -650,7 +650,7 @@ void test_vloxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_m(
@@ -667,7 +667,7 @@ void test_vloxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m(
@@ -684,7 +684,7 @@ void test_vloxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m(
@@ -701,7 +701,7 @@ void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m(
@@ -718,7 +718,7 @@ void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_m(
@@ -735,7 +735,7 @@ void test_vloxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_m(
@@ -752,7 +752,7 @@ void test_vloxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_m(
@@ -769,7 +769,7 @@ void test_vloxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_m(
@@ -786,7 +786,7 @@ void test_vloxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m(
@@ -803,7 +803,7 @@ void test_vloxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m(
@@ -820,7 +820,7 @@ void test_vloxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m(
@@ -837,7 +837,7 @@ void test_vloxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_m(
@@ -854,7 +854,7 @@ void test_vloxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_m(
@@ -871,7 +871,7 @@ void test_vloxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m(
@@ -888,7 +888,7 @@ void test_vloxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m(
@@ -905,7 +905,7 @@ void test_vloxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_m(
@@ -922,7 +922,7 @@ void test_vloxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_m(
@@ -939,7 +939,7 @@ void test_vloxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m(
@@ -956,7 +956,7 @@ void test_vloxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_m(
@@ -973,7 +973,7 @@ void test_vloxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_m(
@@ -990,7 +990,7 @@ void test_vloxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_m(
@@ -1007,7 +1007,7 @@ void test_vloxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_m(
@@ -1024,7 +1024,7 @@ void test_vloxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m(
@@ -1041,7 +1041,7 @@ void test_vloxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m(
@@ -1058,7 +1058,7 @@ void test_vloxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m(
@@ -1075,7 +1075,7 @@ void test_vloxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_m(
@@ -1092,7 +1092,7 @@ void test_vloxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_m(
@@ -1109,7 +1109,7 @@ void test_vloxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m(
@@ -1126,7 +1126,7 @@ void test_vloxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m(
@@ -1143,7 +1143,7 @@ void test_vloxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_m(
@@ -1160,7 +1160,7 @@ void test_vloxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_m(
@@ -1177,7 +1177,7 @@ void test_vloxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m(
@@ -1194,7 +1194,7 @@ void test_vloxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_m(
@@ -1211,7 +1211,7 @@ void test_vloxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_m(
@@ -1228,7 +1228,7 @@ void test_vloxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_m(
@@ -1245,7 +1245,7 @@ void test_vloxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_m(
@@ -1262,6 +1262,6 @@ void test_vloxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c
index 94b01a0daadd..79bb7ceb4ece 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2(
@@ -38,7 +38,7 @@ void test_vloxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1(
@@ -55,7 +55,7 @@ void test_vloxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2(
@@ -72,7 +72,7 @@ void test_vloxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2(
@@ -89,7 +89,7 @@ void test_vloxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1(
@@ -106,7 +106,7 @@ void test_vloxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2(
@@ -123,7 +123,7 @@ void test_vloxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1(
@@ -140,7 +140,7 @@ void test_vloxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2(
@@ -157,7 +157,7 @@ void test_vloxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8(
@@ -174,7 +174,7 @@ void test_vloxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4(
@@ -191,7 +191,7 @@ void test_vloxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2(
@@ -208,7 +208,7 @@ void test_vloxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1(
@@ -225,7 +225,7 @@ void test_vloxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2(
@@ -242,7 +242,7 @@ void test_vloxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4(
@@ -259,7 +259,7 @@ void test_vloxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2(
@@ -276,7 +276,7 @@ void test_vloxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1(
@@ -293,7 +293,7 @@ void test_vloxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2(
@@ -310,7 +310,7 @@ void test_vloxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2(
@@ -327,7 +327,7 @@ void test_vloxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1(
@@ -344,7 +344,7 @@ void test_vloxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2(
@@ -361,7 +361,7 @@ void test_vloxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1(
@@ -378,7 +378,7 @@ void test_vloxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2(
@@ -395,7 +395,7 @@ void test_vloxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8(
@@ -412,7 +412,7 @@ void test_vloxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4(
@@ -429,7 +429,7 @@ void test_vloxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2(
@@ -446,7 +446,7 @@ void test_vloxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1(
@@ -463,7 +463,7 @@ void test_vloxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2(
@@ -480,7 +480,7 @@ void test_vloxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4(
@@ -497,7 +497,7 @@ void test_vloxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2(
@@ -514,7 +514,7 @@ void test_vloxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1(
@@ -531,7 +531,7 @@ void test_vloxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2(
@@ -548,7 +548,7 @@ void test_vloxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2(
@@ -565,7 +565,7 @@ void test_vloxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1(
@@ -582,7 +582,7 @@ void test_vloxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2(
@@ -599,7 +599,7 @@ void test_vloxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1(
@@ -616,7 +616,7 @@ void test_vloxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2(
@@ -633,7 +633,7 @@ void test_vloxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_m(
@@ -650,7 +650,7 @@ void test_vloxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_m(
@@ -667,7 +667,7 @@ void test_vloxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m(
@@ -684,7 +684,7 @@ void test_vloxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m(
@@ -701,7 +701,7 @@ void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m(
@@ -718,7 +718,7 @@ void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_m(
@@ -735,7 +735,7 @@ void test_vloxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_m(
@@ -752,7 +752,7 @@ void test_vloxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_m(
@@ -769,7 +769,7 @@ void test_vloxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_m(
@@ -786,7 +786,7 @@ void test_vloxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m(
@@ -803,7 +803,7 @@ void test_vloxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m(
@@ -820,7 +820,7 @@ void test_vloxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m(
@@ -837,7 +837,7 @@ void test_vloxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m(
@@ -854,7 +854,7 @@ void test_vloxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_m(
@@ -871,7 +871,7 @@ void test_vloxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m(
@@ -888,7 +888,7 @@ void test_vloxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m(
@@ -905,7 +905,7 @@ void test_vloxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_m(
@@ -922,7 +922,7 @@ void test_vloxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_m(
@@ -939,7 +939,7 @@ void test_vloxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m(
@@ -956,7 +956,7 @@ void test_vloxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_m(
@@ -973,7 +973,7 @@ void test_vloxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_m(
@@ -990,7 +990,7 @@ void test_vloxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_m(
@@ -1007,7 +1007,7 @@ void test_vloxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_m(
@@ -1024,7 +1024,7 @@ void test_vloxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m(
@@ -1041,7 +1041,7 @@ void test_vloxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m(
@@ -1058,7 +1058,7 @@ void test_vloxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m(
@@ -1075,7 +1075,7 @@ void test_vloxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_m(
@@ -1092,7 +1092,7 @@ void test_vloxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m(
@@ -1109,7 +1109,7 @@ void test_vloxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m(
@@ -1126,7 +1126,7 @@ void test_vloxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m(
@@ -1143,7 +1143,7 @@ void test_vloxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_m(
@@ -1160,7 +1160,7 @@ void test_vloxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_m(
@@ -1177,7 +1177,7 @@ void test_vloxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m(
@@ -1194,7 +1194,7 @@ void test_vloxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_m(
@@ -1211,7 +1211,7 @@ void test_vloxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_m(
@@ -1228,7 +1228,7 @@ void test_vloxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_m(
@@ -1245,7 +1245,7 @@ void test_vloxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_m(
@@ -1262,6 +1262,6 @@ void test_vloxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c
index 32ff750eb256..b772079ff1f1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2(
@@ -38,7 +38,7 @@ void test_vloxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1(
@@ -55,7 +55,7 @@ void test_vloxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2(
@@ -72,7 +72,7 @@ void test_vloxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2(
@@ -89,7 +89,7 @@ void test_vloxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1(
@@ -106,7 +106,7 @@ void test_vloxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2(
@@ -123,7 +123,7 @@ void test_vloxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1(
@@ -140,7 +140,7 @@ void test_vloxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2(
@@ -157,7 +157,7 @@ void test_vloxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8(
@@ -174,7 +174,7 @@ void test_vloxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4(
@@ -191,7 +191,7 @@ void test_vloxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2(
@@ -208,7 +208,7 @@ void test_vloxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1(
@@ -225,7 +225,7 @@ void test_vloxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4(
@@ -242,7 +242,7 @@ void test_vloxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2(
@@ -259,7 +259,7 @@ void test_vloxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1(
@@ -276,7 +276,7 @@ void test_vloxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2(
@@ -293,7 +293,7 @@ void test_vloxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2(
@@ -310,7 +310,7 @@ void test_vloxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1(
@@ -327,7 +327,7 @@ void test_vloxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2(
@@ -344,7 +344,7 @@ void test_vloxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1(
@@ -361,7 +361,7 @@ void test_vloxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2(
@@ -378,7 +378,7 @@ void test_vloxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vloxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4(
@@ -412,7 +412,7 @@ void test_vloxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2(
@@ -429,7 +429,7 @@ void test_vloxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1(
@@ -446,7 +446,7 @@ void test_vloxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4(
@@ -463,7 +463,7 @@ void test_vloxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2(
@@ -480,7 +480,7 @@ void test_vloxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1(
@@ -497,7 +497,7 @@ void test_vloxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2(
@@ -514,7 +514,7 @@ void test_vloxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2(
@@ -531,7 +531,7 @@ void test_vloxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1(
@@ -548,7 +548,7 @@ void test_vloxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2(
@@ -565,7 +565,7 @@ void test_vloxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1(
@@ -582,7 +582,7 @@ void test_vloxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2(
@@ -599,7 +599,7 @@ void test_vloxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_m(
@@ -616,7 +616,7 @@ void test_vloxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_m(
@@ -633,7 +633,7 @@ void test_vloxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m(
@@ -650,7 +650,7 @@ void test_vloxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m(
@@ -667,7 +667,7 @@ void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m(
@@ -684,7 +684,7 @@ void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_m(
@@ -701,7 +701,7 @@ void test_vloxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_m(
@@ -718,7 +718,7 @@ void test_vloxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_m(
@@ -735,7 +735,7 @@ void test_vloxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_m(
@@ -752,7 +752,7 @@ void test_vloxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m(
@@ -769,7 +769,7 @@ void test_vloxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vloxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m(
@@ -803,7 +803,7 @@ void test_vloxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_m(
@@ -820,7 +820,7 @@ void test_vloxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m(
@@ -837,7 +837,7 @@ void test_vloxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m(
@@ -854,7 +854,7 @@ void test_vloxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_m(
@@ -871,7 +871,7 @@ void test_vloxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_m(
@@ -888,7 +888,7 @@ void test_vloxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m(
@@ -905,7 +905,7 @@ void test_vloxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_m(
@@ -922,7 +922,7 @@ void test_vloxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_m(
@@ -939,7 +939,7 @@ void test_vloxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_m(
@@ -956,7 +956,7 @@ void test_vloxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_m(
@@ -973,7 +973,7 @@ void test_vloxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m(
@@ -990,7 +990,7 @@ void test_vloxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m(
@@ -1007,7 +1007,7 @@ void test_vloxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m(
@@ -1024,7 +1024,7 @@ void test_vloxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_m(
@@ -1041,7 +1041,7 @@ void test_vloxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m(
@@ -1058,7 +1058,7 @@ void test_vloxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m(
@@ -1075,7 +1075,7 @@ void test_vloxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_m(
@@ -1092,7 +1092,7 @@ void test_vloxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_m(
@@ -1109,7 +1109,7 @@ void test_vloxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m(
@@ -1126,7 +1126,7 @@ void test_vloxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_m(
@@ -1143,7 +1143,7 @@ void test_vloxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_m(
@@ -1160,7 +1160,7 @@ void test_vloxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_m(
@@ -1177,7 +1177,7 @@ void test_vloxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_m(
@@ -1194,6 +1194,6 @@ void test_vloxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c
index 744a4e58eb51..f133c3314fa6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2(
@@ -38,7 +38,7 @@ void test_vloxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1(
@@ -55,7 +55,7 @@ void test_vloxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2(
@@ -72,7 +72,7 @@ void test_vloxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2(
@@ -89,7 +89,7 @@ void test_vloxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1(
@@ -106,7 +106,7 @@ void test_vloxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2(
@@ -123,7 +123,7 @@ void test_vloxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1(
@@ -140,7 +140,7 @@ void test_vloxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2(
@@ -157,7 +157,7 @@ void test_vloxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8(
@@ -174,7 +174,7 @@ void test_vloxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4(
@@ -191,7 +191,7 @@ void test_vloxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2(
@@ -208,7 +208,7 @@ void test_vloxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1(
@@ -225,7 +225,7 @@ void test_vloxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2(
@@ -242,7 +242,7 @@ void test_vloxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4(
@@ -259,7 +259,7 @@ void test_vloxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2(
@@ -276,7 +276,7 @@ void test_vloxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1(
@@ -293,7 +293,7 @@ void test_vloxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2(
@@ -310,7 +310,7 @@ void test_vloxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2(
@@ -327,7 +327,7 @@ void test_vloxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1(
@@ -344,7 +344,7 @@ void test_vloxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2(
@@ -361,7 +361,7 @@ void test_vloxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1(
@@ -378,7 +378,7 @@ void test_vloxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2(
@@ -395,7 +395,7 @@ void test_vloxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8(
@@ -412,7 +412,7 @@ void test_vloxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4(
@@ -429,7 +429,7 @@ void test_vloxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2(
@@ -446,7 +446,7 @@ void test_vloxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1(
@@ -463,7 +463,7 @@ void test_vloxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2(
@@ -480,7 +480,7 @@ void test_vloxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4(
@@ -497,7 +497,7 @@ void test_vloxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2(
@@ -514,7 +514,7 @@ void test_vloxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1(
@@ -531,7 +531,7 @@ void test_vloxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2(
@@ -548,7 +548,7 @@ void test_vloxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2(
@@ -565,7 +565,7 @@ void test_vloxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1(
@@ -582,7 +582,7 @@ void test_vloxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2(
@@ -599,7 +599,7 @@ void test_vloxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1(
@@ -616,7 +616,7 @@ void test_vloxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2(
@@ -633,7 +633,7 @@ void test_vloxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_m(
@@ -650,7 +650,7 @@ void test_vloxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_m(
@@ -667,7 +667,7 @@ void test_vloxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m(
@@ -684,7 +684,7 @@ void test_vloxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m(
@@ -701,7 +701,7 @@ void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m(
@@ -718,7 +718,7 @@ void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_m(
@@ -735,7 +735,7 @@ void test_vloxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_m(
@@ -752,7 +752,7 @@ void test_vloxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_m(
@@ -769,7 +769,7 @@ void test_vloxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_m(
@@ -786,7 +786,7 @@ void test_vloxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m(
@@ -803,7 +803,7 @@ void test_vloxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m(
@@ -820,7 +820,7 @@ void test_vloxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m(
@@ -837,7 +837,7 @@ void test_vloxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_m(
@@ -854,7 +854,7 @@ void test_vloxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_m(
@@ -871,7 +871,7 @@ void test_vloxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m(
@@ -888,7 +888,7 @@ void test_vloxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m(
@@ -905,7 +905,7 @@ void test_vloxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_m(
@@ -922,7 +922,7 @@ void test_vloxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_m(
@@ -939,7 +939,7 @@ void test_vloxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m(
@@ -956,7 +956,7 @@ void test_vloxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_m(
@@ -973,7 +973,7 @@ void test_vloxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_m(
@@ -990,7 +990,7 @@ void test_vloxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_m(
@@ -1007,7 +1007,7 @@ void test_vloxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_m(
@@ -1024,7 +1024,7 @@ void test_vloxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m(
@@ -1041,7 +1041,7 @@ void test_vloxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m(
@@ -1058,7 +1058,7 @@ void test_vloxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m(
@@ -1075,7 +1075,7 @@ void test_vloxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_m(
@@ -1092,7 +1092,7 @@ void test_vloxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_m(
@@ -1109,7 +1109,7 @@ void test_vloxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m(
@@ -1126,7 +1126,7 @@ void test_vloxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m(
@@ -1143,7 +1143,7 @@ void test_vloxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_m(
@@ -1160,7 +1160,7 @@ void test_vloxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_m(
@@ -1177,7 +1177,7 @@ void test_vloxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m(
@@ -1194,7 +1194,7 @@ void test_vloxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_m(
@@ -1211,7 +1211,7 @@ void test_vloxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_m(
@@ -1228,7 +1228,7 @@ void test_vloxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_m(
@@ -1245,7 +1245,7 @@ void test_vloxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_m(
@@ -1262,6 +1262,6 @@ void test_vloxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c
index 8fcc13d1bbba..1d0f6662c926 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2(
@@ -42,7 +42,7 @@ void test_vloxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1(
@@ -61,7 +61,7 @@ void test_vloxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2(
@@ -80,7 +80,7 @@ void test_vloxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1(
@@ -99,7 +99,7 @@ void test_vloxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1(
@@ -118,7 +118,7 @@ void test_vloxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8(
@@ -137,7 +137,7 @@ void test_vloxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4(
@@ -156,7 +156,7 @@ void test_vloxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2(
@@ -175,7 +175,7 @@ void test_vloxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1(
@@ -194,7 +194,7 @@ void test_vloxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4(
@@ -213,7 +213,7 @@ void test_vloxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2(
@@ -232,7 +232,7 @@ void test_vloxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1(
@@ -251,7 +251,7 @@ void test_vloxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2(
@@ -270,7 +270,7 @@ void test_vloxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1(
@@ -289,7 +289,7 @@ void test_vloxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1(
@@ -308,7 +308,7 @@ void test_vloxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8(
@@ -327,7 +327,7 @@ void test_vloxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vloxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2(
@@ -365,7 +365,7 @@ void test_vloxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1(
@@ -384,7 +384,7 @@ void test_vloxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4(
@@ -403,7 +403,7 @@ void test_vloxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2(
@@ -422,7 +422,7 @@ void test_vloxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1(
@@ -441,7 +441,7 @@ void test_vloxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2(
@@ -460,7 +460,7 @@ void test_vloxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1(
@@ -479,7 +479,7 @@ void test_vloxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1(
@@ -498,7 +498,7 @@ void test_vloxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_m(
@@ -517,7 +517,7 @@ void test_vloxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_m(
@@ -536,7 +536,7 @@ void test_vloxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m(
@@ -555,7 +555,7 @@ void test_vloxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m(
@@ -574,7 +574,7 @@ void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_m(
@@ -593,7 +593,7 @@ void test_vloxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_m(
@@ -612,7 +612,7 @@ void test_vloxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m(
@@ -631,7 +631,7 @@ void test_vloxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m(
@@ -650,7 +650,7 @@ void test_vloxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m(
@@ -669,7 +669,7 @@ void test_vloxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vloxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m(
@@ -707,7 +707,7 @@ void test_vloxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m(
@@ -726,7 +726,7 @@ void test_vloxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_m(
@@ -745,7 +745,7 @@ void test_vloxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m(
@@ -764,7 +764,7 @@ void test_vloxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_m(
@@ -783,7 +783,7 @@ void test_vloxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_m(
@@ -802,7 +802,7 @@ void test_vloxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m(
@@ -821,7 +821,7 @@ void test_vloxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m(
@@ -840,7 +840,7 @@ void test_vloxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m(
@@ -859,7 +859,7 @@ void test_vloxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_m(
@@ -878,7 +878,7 @@ void test_vloxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m(
@@ -897,7 +897,7 @@ void test_vloxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m(
@@ -916,7 +916,7 @@ void test_vloxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_m(
@@ -935,7 +935,7 @@ void test_vloxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m(
@@ -954,7 +954,7 @@ void test_vloxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_m(
@@ -973,7 +973,7 @@ void test_vloxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_m(
@@ -992,6 +992,6 @@ void test_vloxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c
index 2de29b38dd81..f81246d1a009 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2(
@@ -42,7 +42,7 @@ void test_vloxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1(
@@ -61,7 +61,7 @@ void test_vloxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2(
@@ -80,7 +80,7 @@ void test_vloxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1(
@@ -99,7 +99,7 @@ void test_vloxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1(
@@ -118,7 +118,7 @@ void test_vloxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8(
@@ -137,7 +137,7 @@ void test_vloxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4(
@@ -156,7 +156,7 @@ void test_vloxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2(
@@ -175,7 +175,7 @@ void test_vloxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1(
@@ -194,7 +194,7 @@ void test_vloxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4(
@@ -213,7 +213,7 @@ void test_vloxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2(
@@ -232,7 +232,7 @@ void test_vloxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1(
@@ -251,7 +251,7 @@ void test_vloxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2(
@@ -270,7 +270,7 @@ void test_vloxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1(
@@ -289,7 +289,7 @@ void test_vloxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1(
@@ -308,7 +308,7 @@ void test_vloxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8(
@@ -327,7 +327,7 @@ void test_vloxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vloxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2(
@@ -365,7 +365,7 @@ void test_vloxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1(
@@ -384,7 +384,7 @@ void test_vloxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4(
@@ -403,7 +403,7 @@ void test_vloxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2(
@@ -422,7 +422,7 @@ void test_vloxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1(
@@ -441,7 +441,7 @@ void test_vloxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2(
@@ -460,7 +460,7 @@ void test_vloxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1(
@@ -479,7 +479,7 @@ void test_vloxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1(
@@ -498,7 +498,7 @@ void test_vloxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_m(
@@ -517,7 +517,7 @@ void test_vloxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_m(
@@ -536,7 +536,7 @@ void test_vloxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m(
@@ -555,7 +555,7 @@ void test_vloxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m(
@@ -574,7 +574,7 @@ void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_m(
@@ -593,7 +593,7 @@ void test_vloxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_m(
@@ -612,7 +612,7 @@ void test_vloxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m(
@@ -631,7 +631,7 @@ void test_vloxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m(
@@ -650,7 +650,7 @@ void test_vloxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m(
@@ -669,7 +669,7 @@ void test_vloxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vloxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m(
@@ -707,7 +707,7 @@ void test_vloxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m(
@@ -726,7 +726,7 @@ void test_vloxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_m(
@@ -745,7 +745,7 @@ void test_vloxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m(
@@ -764,7 +764,7 @@ void test_vloxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_m(
@@ -783,7 +783,7 @@ void test_vloxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_m(
@@ -802,7 +802,7 @@ void test_vloxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m(
@@ -821,7 +821,7 @@ void test_vloxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m(
@@ -840,7 +840,7 @@ void test_vloxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m(
@@ -859,7 +859,7 @@ void test_vloxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_m(
@@ -878,7 +878,7 @@ void test_vloxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m(
@@ -897,7 +897,7 @@ void test_vloxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m(
@@ -916,7 +916,7 @@ void test_vloxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_m(
@@ -935,7 +935,7 @@ void test_vloxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m(
@@ -954,7 +954,7 @@ void test_vloxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_m(
@@ -973,7 +973,7 @@ void test_vloxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_m(
@@ -992,6 +992,6 @@ void test_vloxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c
index 1735ce73e9cf..095e72301ca0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2(
@@ -42,7 +42,7 @@ void test_vloxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1(
@@ -61,7 +61,7 @@ void test_vloxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2(
@@ -80,7 +80,7 @@ void test_vloxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1(
@@ -99,7 +99,7 @@ void test_vloxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1(
@@ -118,7 +118,7 @@ void test_vloxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8(
@@ -137,7 +137,7 @@ void test_vloxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4(
@@ -156,7 +156,7 @@ void test_vloxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2(
@@ -175,7 +175,7 @@ void test_vloxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1(
@@ -194,7 +194,7 @@ void test_vloxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4(
@@ -213,7 +213,7 @@ void test_vloxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2(
@@ -232,7 +232,7 @@ void test_vloxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1(
@@ -251,7 +251,7 @@ void test_vloxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2(
@@ -270,7 +270,7 @@ void test_vloxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1(
@@ -289,7 +289,7 @@ void test_vloxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1(
@@ -308,7 +308,7 @@ void test_vloxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8(
@@ -327,7 +327,7 @@ void test_vloxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vloxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2(
@@ -365,7 +365,7 @@ void test_vloxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1(
@@ -384,7 +384,7 @@ void test_vloxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4(
@@ -403,7 +403,7 @@ void test_vloxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2(
@@ -422,7 +422,7 @@ void test_vloxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1(
@@ -441,7 +441,7 @@ void test_vloxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2(
@@ -460,7 +460,7 @@ void test_vloxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1(
@@ -479,7 +479,7 @@ void test_vloxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1(
@@ -498,7 +498,7 @@ void test_vloxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_m(
@@ -517,7 +517,7 @@ void test_vloxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_m(
@@ -536,7 +536,7 @@ void test_vloxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m(
@@ -555,7 +555,7 @@ void test_vloxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m(
@@ -574,7 +574,7 @@ void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_m(
@@ -593,7 +593,7 @@ void test_vloxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_m(
@@ -612,7 +612,7 @@ void test_vloxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m(
@@ -631,7 +631,7 @@ void test_vloxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m(
@@ -650,7 +650,7 @@ void test_vloxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m(
@@ -669,7 +669,7 @@ void test_vloxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vloxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m(
@@ -707,7 +707,7 @@ void test_vloxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m(
@@ -726,7 +726,7 @@ void test_vloxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_m(
@@ -745,7 +745,7 @@ void test_vloxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m(
@@ -764,7 +764,7 @@ void test_vloxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_m(
@@ -783,7 +783,7 @@ void test_vloxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_m(
@@ -802,7 +802,7 @@ void test_vloxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m(
@@ -821,7 +821,7 @@ void test_vloxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m(
@@ -840,7 +840,7 @@ void test_vloxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m(
@@ -859,7 +859,7 @@ void test_vloxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_m(
@@ -878,7 +878,7 @@ void test_vloxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m(
@@ -897,7 +897,7 @@ void test_vloxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m(
@@ -916,7 +916,7 @@ void test_vloxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_m(
@@ -935,7 +935,7 @@ void test_vloxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m(
@@ -954,7 +954,7 @@ void test_vloxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_m(
@@ -973,7 +973,7 @@ void test_vloxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_m(
@@ -992,6 +992,6 @@ void test_vloxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c
index fed6154ff3d7..5a90bfebd6d8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2(
@@ -42,7 +42,7 @@ void test_vloxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1(
@@ -61,7 +61,7 @@ void test_vloxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2(
@@ -80,7 +80,7 @@ void test_vloxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1(
@@ -99,7 +99,7 @@ void test_vloxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1(
@@ -118,7 +118,7 @@ void test_vloxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8(
@@ -137,7 +137,7 @@ void test_vloxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4(
@@ -156,7 +156,7 @@ void test_vloxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2(
@@ -175,7 +175,7 @@ void test_vloxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1(
@@ -194,7 +194,7 @@ void test_vloxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4(
@@ -213,7 +213,7 @@ void test_vloxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2(
@@ -232,7 +232,7 @@ void test_vloxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1(
@@ -251,7 +251,7 @@ void test_vloxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2(
@@ -270,7 +270,7 @@ void test_vloxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1(
@@ -289,7 +289,7 @@ void test_vloxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1(
@@ -308,7 +308,7 @@ void test_vloxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8(
@@ -327,7 +327,7 @@ void test_vloxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vloxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2(
@@ -365,7 +365,7 @@ void test_vloxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1(
@@ -384,7 +384,7 @@ void test_vloxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4(
@@ -403,7 +403,7 @@ void test_vloxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2(
@@ -422,7 +422,7 @@ void test_vloxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1(
@@ -441,7 +441,7 @@ void test_vloxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2(
@@ -460,7 +460,7 @@ void test_vloxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1(
@@ -479,7 +479,7 @@ void test_vloxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1(
@@ -498,7 +498,7 @@ void test_vloxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_m(
@@ -517,7 +517,7 @@ void test_vloxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_m(
@@ -536,7 +536,7 @@ void test_vloxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m(
@@ -555,7 +555,7 @@ void test_vloxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m(
@@ -574,7 +574,7 @@ void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_m(
@@ -593,7 +593,7 @@ void test_vloxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_m(
@@ -612,7 +612,7 @@ void test_vloxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m(
@@ -631,7 +631,7 @@ void test_vloxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m(
@@ -650,7 +650,7 @@ void test_vloxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m(
@@ -669,7 +669,7 @@ void test_vloxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vloxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m(
@@ -707,7 +707,7 @@ void test_vloxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m(
@@ -726,7 +726,7 @@ void test_vloxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_m(
@@ -745,7 +745,7 @@ void test_vloxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m(
@@ -764,7 +764,7 @@ void test_vloxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_m(
@@ -783,7 +783,7 @@ void test_vloxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_m(
@@ -802,7 +802,7 @@ void test_vloxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m(
@@ -821,7 +821,7 @@ void test_vloxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m(
@@ -840,7 +840,7 @@ void test_vloxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m(
@@ -859,7 +859,7 @@ void test_vloxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_m(
@@ -878,7 +878,7 @@ void test_vloxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m(
@@ -897,7 +897,7 @@ void test_vloxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m(
@@ -916,7 +916,7 @@ void test_vloxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_m(
@@ -935,7 +935,7 @@ void test_vloxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m(
@@ -954,7 +954,7 @@ void test_vloxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_m(
@@ -973,7 +973,7 @@ void test_vloxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_m(
@@ -992,6 +992,6 @@ void test_vloxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c
index a42a567962a8..8dd72cf843ea 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2(
@@ -46,7 +46,7 @@ void test_vloxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1(
@@ -67,7 +67,7 @@ void test_vloxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2(
@@ -88,7 +88,7 @@ void test_vloxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1(
@@ -109,7 +109,7 @@ void test_vloxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1(
@@ -130,7 +130,7 @@ void test_vloxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8(
@@ -151,7 +151,7 @@ void test_vloxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4(
@@ -172,7 +172,7 @@ void test_vloxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2(
@@ -193,7 +193,7 @@ void test_vloxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1(
@@ -214,7 +214,7 @@ void test_vloxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4(
@@ -235,7 +235,7 @@ void test_vloxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2(
@@ -256,7 +256,7 @@ void test_vloxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1(
@@ -277,7 +277,7 @@ void test_vloxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2(
@@ -298,7 +298,7 @@ void test_vloxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1(
@@ -319,7 +319,7 @@ void test_vloxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1(
@@ -340,7 +340,7 @@ void test_vloxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8(
@@ -361,7 +361,7 @@ void test_vloxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4(
@@ -382,7 +382,7 @@ void test_vloxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2(
@@ -403,7 +403,7 @@ void test_vloxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1(
@@ -424,7 +424,7 @@ void test_vloxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4(
@@ -445,7 +445,7 @@ void test_vloxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2(
@@ -466,7 +466,7 @@ void test_vloxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1(
@@ -487,7 +487,7 @@ void test_vloxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2(
@@ -508,7 +508,7 @@ void test_vloxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1(
@@ -529,7 +529,7 @@ void test_vloxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1(
@@ -550,7 +550,7 @@ void test_vloxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_m(
@@ -571,7 +571,7 @@ void test_vloxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_m(
@@ -592,7 +592,7 @@ void test_vloxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m(
@@ -613,7 +613,7 @@ void test_vloxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_m(
@@ -655,7 +655,7 @@ void test_vloxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_m(
@@ -676,7 +676,7 @@ void test_vloxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m(
@@ -697,7 +697,7 @@ void test_vloxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m(
@@ -718,7 +718,7 @@ void test_vloxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vloxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_m(
@@ -760,7 +760,7 @@ void test_vloxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m(
@@ -781,7 +781,7 @@ void test_vloxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m(
@@ -802,7 +802,7 @@ void test_vloxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_m(
@@ -823,7 +823,7 @@ void test_vloxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vloxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_m(
@@ -865,7 +865,7 @@ void test_vloxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_m(
@@ -886,7 +886,7 @@ void test_vloxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m(
@@ -907,7 +907,7 @@ void test_vloxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m(
@@ -928,7 +928,7 @@ void test_vloxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vloxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_m(
@@ -970,7 +970,7 @@ void test_vloxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m(
@@ -991,7 +991,7 @@ void test_vloxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m(
@@ -1012,7 +1012,7 @@ void test_vloxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_m(
@@ -1033,7 +1033,7 @@ void test_vloxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_m(
@@ -1075,7 +1075,7 @@ void test_vloxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_m(
@@ -1096,6 +1096,6 @@ void test_vloxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c
index 181b600fb1ff..e844de3e2188 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2(
@@ -46,7 +46,7 @@ void test_vloxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1(
@@ -67,7 +67,7 @@ void test_vloxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2(
@@ -88,7 +88,7 @@ void test_vloxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1(
@@ -109,7 +109,7 @@ void test_vloxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1(
@@ -130,7 +130,7 @@ void test_vloxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8(
@@ -151,7 +151,7 @@ void test_vloxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4(
@@ -172,7 +172,7 @@ void test_vloxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2(
@@ -193,7 +193,7 @@ void test_vloxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1(
@@ -214,7 +214,7 @@ void test_vloxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4(
@@ -235,7 +235,7 @@ void test_vloxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2(
@@ -256,7 +256,7 @@ void test_vloxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1(
@@ -277,7 +277,7 @@ void test_vloxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2(
@@ -298,7 +298,7 @@ void test_vloxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1(
@@ -319,7 +319,7 @@ void test_vloxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1(
@@ -340,7 +340,7 @@ void test_vloxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8(
@@ -361,7 +361,7 @@ void test_vloxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4(
@@ -382,7 +382,7 @@ void test_vloxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2(
@@ -403,7 +403,7 @@ void test_vloxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1(
@@ -424,7 +424,7 @@ void test_vloxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4(
@@ -445,7 +445,7 @@ void test_vloxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2(
@@ -466,7 +466,7 @@ void test_vloxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1(
@@ -487,7 +487,7 @@ void test_vloxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2(
@@ -508,7 +508,7 @@ void test_vloxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1(
@@ -529,7 +529,7 @@ void test_vloxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1(
@@ -550,7 +550,7 @@ void test_vloxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_m(
@@ -571,7 +571,7 @@ void test_vloxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_m(
@@ -592,7 +592,7 @@ void test_vloxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m(
@@ -613,7 +613,7 @@ void test_vloxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_m(
@@ -655,7 +655,7 @@ void test_vloxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_m(
@@ -676,7 +676,7 @@ void test_vloxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m(
@@ -697,7 +697,7 @@ void test_vloxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m(
@@ -718,7 +718,7 @@ void test_vloxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vloxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m(
@@ -760,7 +760,7 @@ void test_vloxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m(
@@ -781,7 +781,7 @@ void test_vloxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m(
@@ -802,7 +802,7 @@ void test_vloxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_m(
@@ -823,7 +823,7 @@ void test_vloxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vloxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_m(
@@ -865,7 +865,7 @@ void test_vloxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_m(
@@ -886,7 +886,7 @@ void test_vloxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m(
@@ -907,7 +907,7 @@ void test_vloxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m(
@@ -928,7 +928,7 @@ void test_vloxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vloxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m(
@@ -970,7 +970,7 @@ void test_vloxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m(
@@ -991,7 +991,7 @@ void test_vloxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m(
@@ -1012,7 +1012,7 @@ void test_vloxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_m(
@@ -1033,7 +1033,7 @@ void test_vloxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_m(
@@ -1075,7 +1075,7 @@ void test_vloxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_m(
@@ -1096,6 +1096,6 @@ void test_vloxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c
index 9160aa9a2a0a..f8b83b28b844 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2(
@@ -46,7 +46,7 @@ void test_vloxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1(
@@ -67,7 +67,7 @@ void test_vloxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2(
@@ -88,7 +88,7 @@ void test_vloxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1(
@@ -109,7 +109,7 @@ void test_vloxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1(
@@ -130,7 +130,7 @@ void test_vloxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8(
@@ -151,7 +151,7 @@ void test_vloxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4(
@@ -172,7 +172,7 @@ void test_vloxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2(
@@ -193,7 +193,7 @@ void test_vloxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1(
@@ -214,7 +214,7 @@ void test_vloxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4(
@@ -235,7 +235,7 @@ void test_vloxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2(
@@ -256,7 +256,7 @@ void test_vloxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1(
@@ -277,7 +277,7 @@ void test_vloxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2(
@@ -298,7 +298,7 @@ void test_vloxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1(
@@ -319,7 +319,7 @@ void test_vloxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1(
@@ -340,7 +340,7 @@ void test_vloxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8(
@@ -361,7 +361,7 @@ void test_vloxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4(
@@ -382,7 +382,7 @@ void test_vloxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2(
@@ -403,7 +403,7 @@ void test_vloxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1(
@@ -424,7 +424,7 @@ void test_vloxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4(
@@ -445,7 +445,7 @@ void test_vloxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2(
@@ -466,7 +466,7 @@ void test_vloxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1(
@@ -487,7 +487,7 @@ void test_vloxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2(
@@ -508,7 +508,7 @@ void test_vloxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1(
@@ -529,7 +529,7 @@ void test_vloxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1(
@@ -550,7 +550,7 @@ void test_vloxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_m(
@@ -571,7 +571,7 @@ void test_vloxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_m(
@@ -592,7 +592,7 @@ void test_vloxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m(
@@ -613,7 +613,7 @@ void test_vloxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_m(
@@ -655,7 +655,7 @@ void test_vloxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_m(
@@ -676,7 +676,7 @@ void test_vloxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m(
@@ -697,7 +697,7 @@ void test_vloxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m(
@@ -718,7 +718,7 @@ void test_vloxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vloxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_m(
@@ -760,7 +760,7 @@ void test_vloxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m(
@@ -781,7 +781,7 @@ void test_vloxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m(
@@ -802,7 +802,7 @@ void test_vloxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_m(
@@ -823,7 +823,7 @@ void test_vloxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vloxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_m(
@@ -865,7 +865,7 @@ void test_vloxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_m(
@@ -886,7 +886,7 @@ void test_vloxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m(
@@ -907,7 +907,7 @@ void test_vloxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m(
@@ -928,7 +928,7 @@ void test_vloxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vloxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_m(
@@ -970,7 +970,7 @@ void test_vloxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m(
@@ -991,7 +991,7 @@ void test_vloxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m(
@@ -1012,7 +1012,7 @@ void test_vloxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_m(
@@ -1033,7 +1033,7 @@ void test_vloxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_m(
@@ -1075,7 +1075,7 @@ void test_vloxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_m(
@@ -1096,6 +1096,6 @@ void test_vloxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c
index c376428cd298..eeff9e8ff876 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2(
@@ -46,7 +46,7 @@ void test_vloxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1(
@@ -67,7 +67,7 @@ void test_vloxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2(
@@ -88,7 +88,7 @@ void test_vloxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1(
@@ -109,7 +109,7 @@ void test_vloxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1(
@@ -130,7 +130,7 @@ void test_vloxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8(
@@ -151,7 +151,7 @@ void test_vloxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4(
@@ -172,7 +172,7 @@ void test_vloxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2(
@@ -193,7 +193,7 @@ void test_vloxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1(
@@ -214,7 +214,7 @@ void test_vloxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4(
@@ -235,7 +235,7 @@ void test_vloxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2(
@@ -256,7 +256,7 @@ void test_vloxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1(
@@ -277,7 +277,7 @@ void test_vloxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2(
@@ -298,7 +298,7 @@ void test_vloxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1(
@@ -319,7 +319,7 @@ void test_vloxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1(
@@ -340,7 +340,7 @@ void test_vloxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8(
@@ -361,7 +361,7 @@ void test_vloxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4(
@@ -382,7 +382,7 @@ void test_vloxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2(
@@ -403,7 +403,7 @@ void test_vloxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1(
@@ -424,7 +424,7 @@ void test_vloxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4(
@@ -445,7 +445,7 @@ void test_vloxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2(
@@ -466,7 +466,7 @@ void test_vloxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1(
@@ -487,7 +487,7 @@ void test_vloxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2(
@@ -508,7 +508,7 @@ void test_vloxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1(
@@ -529,7 +529,7 @@ void test_vloxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1(
@@ -550,7 +550,7 @@ void test_vloxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_m(
@@ -571,7 +571,7 @@ void test_vloxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_m(
@@ -592,7 +592,7 @@ void test_vloxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m(
@@ -613,7 +613,7 @@ void test_vloxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_m(
@@ -655,7 +655,7 @@ void test_vloxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_m(
@@ -676,7 +676,7 @@ void test_vloxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m(
@@ -697,7 +697,7 @@ void test_vloxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m(
@@ -718,7 +718,7 @@ void test_vloxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vloxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_m(
@@ -760,7 +760,7 @@ void test_vloxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m(
@@ -781,7 +781,7 @@ void test_vloxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m(
@@ -802,7 +802,7 @@ void test_vloxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_m(
@@ -823,7 +823,7 @@ void test_vloxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vloxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_m(
@@ -865,7 +865,7 @@ void test_vloxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_m(
@@ -886,7 +886,7 @@ void test_vloxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m(
@@ -907,7 +907,7 @@ void test_vloxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m(
@@ -928,7 +928,7 @@ void test_vloxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vloxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_m(
@@ -970,7 +970,7 @@ void test_vloxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m(
@@ -991,7 +991,7 @@ void test_vloxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m(
@@ -1012,7 +1012,7 @@ void test_vloxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_m(
@@ -1033,7 +1033,7 @@ void test_vloxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_m(
@@ -1075,7 +1075,7 @@ void test_vloxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_m(
@@ -1096,6 +1096,6 @@ void test_vloxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c
index 34064b8880d4..fa86ee8d1871 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2(
@@ -50,7 +50,7 @@ void test_vloxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1(
@@ -73,7 +73,7 @@ void test_vloxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2(
@@ -96,7 +96,7 @@ void test_vloxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1(
@@ -119,7 +119,7 @@ void test_vloxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1(
@@ -142,7 +142,7 @@ void test_vloxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8(
@@ -165,7 +165,7 @@ void test_vloxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4(
@@ -188,7 +188,7 @@ void test_vloxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2(
@@ -211,7 +211,7 @@ void test_vloxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1(
@@ -234,7 +234,7 @@ void test_vloxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4(
@@ -257,7 +257,7 @@ void test_vloxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2(
@@ -280,7 +280,7 @@ void test_vloxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1(
@@ -303,7 +303,7 @@ void test_vloxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2(
@@ -326,7 +326,7 @@ void test_vloxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1(
@@ -349,7 +349,7 @@ void test_vloxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1(
@@ -372,7 +372,7 @@ void test_vloxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vloxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4(
@@ -418,7 +418,7 @@ void test_vloxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2(
@@ -441,7 +441,7 @@ void test_vloxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1(
@@ -464,7 +464,7 @@ void test_vloxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4(
@@ -487,7 +487,7 @@ void test_vloxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2(
@@ -510,7 +510,7 @@ void test_vloxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1(
@@ -533,7 +533,7 @@ void test_vloxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2(
@@ -556,7 +556,7 @@ void test_vloxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1(
@@ -579,7 +579,7 @@ void test_vloxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1(
@@ -602,7 +602,7 @@ void test_vloxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_m(
@@ -625,7 +625,7 @@ void test_vloxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_m(
@@ -648,7 +648,7 @@ void test_vloxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m(
@@ -671,7 +671,7 @@ void test_vloxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m(
@@ -694,7 +694,7 @@ void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_m(
@@ -717,7 +717,7 @@ void test_vloxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_m(
@@ -740,7 +740,7 @@ void test_vloxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m(
@@ -763,7 +763,7 @@ void test_vloxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vloxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m(
@@ -809,7 +809,7 @@ void test_vloxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_m(
@@ -832,7 +832,7 @@ void test_vloxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m(
@@ -855,7 +855,7 @@ void test_vloxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m(
@@ -878,7 +878,7 @@ void test_vloxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vloxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m(
@@ -924,7 +924,7 @@ void test_vloxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_m(
@@ -947,7 +947,7 @@ void test_vloxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_m(
@@ -970,7 +970,7 @@ void test_vloxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m(
@@ -993,7 +993,7 @@ void test_vloxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m(
@@ -1016,7 +1016,7 @@ void test_vloxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m(
@@ -1039,7 +1039,7 @@ void test_vloxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_m(
@@ -1062,7 +1062,7 @@ void test_vloxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m(
@@ -1085,7 +1085,7 @@ void test_vloxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m(
@@ -1108,7 +1108,7 @@ void test_vloxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_m(
@@ -1131,7 +1131,7 @@ void test_vloxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m(
@@ -1154,7 +1154,7 @@ void test_vloxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_m(
@@ -1177,7 +1177,7 @@ void test_vloxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_m(
@@ -1200,6 +1200,6 @@ void test_vloxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c
index 50e1ff0edc7c..f0de76cdfd63 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2(
@@ -50,7 +50,7 @@ void test_vloxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1(
@@ -73,7 +73,7 @@ void test_vloxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2(
@@ -96,7 +96,7 @@ void test_vloxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1(
@@ -119,7 +119,7 @@ void test_vloxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1(
@@ -142,7 +142,7 @@ void test_vloxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8(
@@ -165,7 +165,7 @@ void test_vloxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4(
@@ -188,7 +188,7 @@ void test_vloxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2(
@@ -211,7 +211,7 @@ void test_vloxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1(
@@ -234,7 +234,7 @@ void test_vloxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4(
@@ -257,7 +257,7 @@ void test_vloxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2(
@@ -280,7 +280,7 @@ void test_vloxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1(
@@ -303,7 +303,7 @@ void test_vloxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2(
@@ -326,7 +326,7 @@ void test_vloxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1(
@@ -349,7 +349,7 @@ void test_vloxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1(
@@ -372,7 +372,7 @@ void test_vloxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vloxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4(
@@ -418,7 +418,7 @@ void test_vloxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2(
@@ -441,7 +441,7 @@ void test_vloxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1(
@@ -464,7 +464,7 @@ void test_vloxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4(
@@ -487,7 +487,7 @@ void test_vloxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2(
@@ -510,7 +510,7 @@ void test_vloxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1(
@@ -533,7 +533,7 @@ void test_vloxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2(
@@ -556,7 +556,7 @@ void test_vloxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1(
@@ -579,7 +579,7 @@ void test_vloxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1(
@@ -602,7 +602,7 @@ void test_vloxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_m(
@@ -625,7 +625,7 @@ void test_vloxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_m(
@@ -648,7 +648,7 @@ void test_vloxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m(
@@ -671,7 +671,7 @@ void test_vloxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m(
@@ -694,7 +694,7 @@ void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_m(
@@ -717,7 +717,7 @@ void test_vloxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_m(
@@ -740,7 +740,7 @@ void test_vloxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m(
@@ -763,7 +763,7 @@ void test_vloxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vloxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m(
@@ -809,7 +809,7 @@ void test_vloxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_m(
@@ -832,7 +832,7 @@ void test_vloxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m(
@@ -855,7 +855,7 @@ void test_vloxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m(
@@ -878,7 +878,7 @@ void test_vloxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vloxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m(
@@ -924,7 +924,7 @@ void test_vloxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_m(
@@ -947,7 +947,7 @@ void test_vloxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_m(
@@ -970,7 +970,7 @@ void test_vloxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m(
@@ -993,7 +993,7 @@ void test_vloxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m(
@@ -1016,7 +1016,7 @@ void test_vloxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m(
@@ -1039,7 +1039,7 @@ void test_vloxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m(
@@ -1062,7 +1062,7 @@ void test_vloxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m(
@@ -1085,7 +1085,7 @@ void test_vloxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m(
@@ -1108,7 +1108,7 @@ void test_vloxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_m(
@@ -1131,7 +1131,7 @@ void test_vloxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m(
@@ -1154,7 +1154,7 @@ void test_vloxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_m(
@@ -1177,7 +1177,7 @@ void test_vloxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_m(
@@ -1200,6 +1200,6 @@ void test_vloxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c
index e0d2c2428e36..c4e9cddc0324 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2(
@@ -50,7 +50,7 @@ void test_vloxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1(
@@ -73,7 +73,7 @@ void test_vloxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2(
@@ -96,7 +96,7 @@ void test_vloxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1(
@@ -119,7 +119,7 @@ void test_vloxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1(
@@ -142,7 +142,7 @@ void test_vloxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8(
@@ -165,7 +165,7 @@ void test_vloxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4(
@@ -188,7 +188,7 @@ void test_vloxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2(
@@ -211,7 +211,7 @@ void test_vloxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1(
@@ -234,7 +234,7 @@ void test_vloxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4(
@@ -257,7 +257,7 @@ void test_vloxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2(
@@ -280,7 +280,7 @@ void test_vloxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1(
@@ -303,7 +303,7 @@ void test_vloxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2(
@@ -326,7 +326,7 @@ void test_vloxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1(
@@ -349,7 +349,7 @@ void test_vloxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1(
@@ -372,7 +372,7 @@ void test_vloxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vloxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4(
@@ -418,7 +418,7 @@ void test_vloxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2(
@@ -441,7 +441,7 @@ void test_vloxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1(
@@ -464,7 +464,7 @@ void test_vloxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4(
@@ -487,7 +487,7 @@ void test_vloxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2(
@@ -510,7 +510,7 @@ void test_vloxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1(
@@ -533,7 +533,7 @@ void test_vloxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2(
@@ -556,7 +556,7 @@ void test_vloxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1(
@@ -579,7 +579,7 @@ void test_vloxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1(
@@ -602,7 +602,7 @@ void test_vloxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_m(
@@ -625,7 +625,7 @@ void test_vloxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_m(
@@ -648,7 +648,7 @@ void test_vloxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m(
@@ -671,7 +671,7 @@ void test_vloxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m(
@@ -694,7 +694,7 @@ void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_m(
@@ -717,7 +717,7 @@ void test_vloxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_m(
@@ -740,7 +740,7 @@ void test_vloxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m(
@@ -763,7 +763,7 @@ void test_vloxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vloxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m(
@@ -809,7 +809,7 @@ void test_vloxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_m(
@@ -832,7 +832,7 @@ void test_vloxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m(
@@ -855,7 +855,7 @@ void test_vloxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m(
@@ -878,7 +878,7 @@ void test_vloxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vloxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m(
@@ -924,7 +924,7 @@ void test_vloxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_m(
@@ -947,7 +947,7 @@ void test_vloxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_m(
@@ -970,7 +970,7 @@ void test_vloxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m(
@@ -993,7 +993,7 @@ void test_vloxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m(
@@ -1016,7 +1016,7 @@ void test_vloxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m(
@@ -1039,7 +1039,7 @@ void test_vloxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_m(
@@ -1062,7 +1062,7 @@ void test_vloxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m(
@@ -1085,7 +1085,7 @@ void test_vloxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m(
@@ -1108,7 +1108,7 @@ void test_vloxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_m(
@@ -1131,7 +1131,7 @@ void test_vloxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m(
@@ -1154,7 +1154,7 @@ void test_vloxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_m(
@@ -1177,7 +1177,7 @@ void test_vloxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_m(
@@ -1200,6 +1200,6 @@ void test_vloxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c
index c5008b541f2b..f2e17d70b034 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2(
@@ -50,7 +50,7 @@ void test_vloxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1(
@@ -73,7 +73,7 @@ void test_vloxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2(
@@ -96,7 +96,7 @@ void test_vloxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1(
@@ -119,7 +119,7 @@ void test_vloxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1(
@@ -142,7 +142,7 @@ void test_vloxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8(
@@ -165,7 +165,7 @@ void test_vloxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4(
@@ -188,7 +188,7 @@ void test_vloxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2(
@@ -211,7 +211,7 @@ void test_vloxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1(
@@ -234,7 +234,7 @@ void test_vloxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4(
@@ -257,7 +257,7 @@ void test_vloxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2(
@@ -280,7 +280,7 @@ void test_vloxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1(
@@ -303,7 +303,7 @@ void test_vloxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2(
@@ -326,7 +326,7 @@ void test_vloxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1(
@@ -349,7 +349,7 @@ void test_vloxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1(
@@ -372,7 +372,7 @@ void test_vloxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vloxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4(
@@ -418,7 +418,7 @@ void test_vloxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2(
@@ -441,7 +441,7 @@ void test_vloxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1(
@@ -464,7 +464,7 @@ void test_vloxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4(
@@ -487,7 +487,7 @@ void test_vloxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2(
@@ -510,7 +510,7 @@ void test_vloxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1(
@@ -533,7 +533,7 @@ void test_vloxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2(
@@ -556,7 +556,7 @@ void test_vloxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1(
@@ -579,7 +579,7 @@ void test_vloxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1(
@@ -602,7 +602,7 @@ void test_vloxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_m(
@@ -625,7 +625,7 @@ void test_vloxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_m(
@@ -648,7 +648,7 @@ void test_vloxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m(
@@ -671,7 +671,7 @@ void test_vloxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m(
@@ -694,7 +694,7 @@ void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_m(
@@ -717,7 +717,7 @@ void test_vloxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_m(
@@ -740,7 +740,7 @@ void test_vloxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m(
@@ -763,7 +763,7 @@ void test_vloxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vloxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m(
@@ -809,7 +809,7 @@ void test_vloxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_m(
@@ -832,7 +832,7 @@ void test_vloxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m(
@@ -855,7 +855,7 @@ void test_vloxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m(
@@ -878,7 +878,7 @@ void test_vloxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vloxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m(
@@ -924,7 +924,7 @@ void test_vloxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_m(
@@ -947,7 +947,7 @@ void test_vloxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_m(
@@ -970,7 +970,7 @@ void test_vloxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m(
@@ -993,7 +993,7 @@ void test_vloxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m(
@@ -1016,7 +1016,7 @@ void test_vloxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m(
@@ -1039,7 +1039,7 @@ void test_vloxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_m(
@@ -1062,7 +1062,7 @@ void test_vloxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m(
@@ -1085,7 +1085,7 @@ void test_vloxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m(
@@ -1108,7 +1108,7 @@ void test_vloxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_m(
@@ -1131,7 +1131,7 @@ void test_vloxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m(
@@ -1154,7 +1154,7 @@ void test_vloxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_m(
@@ -1177,7 +1177,7 @@ void test_vloxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_m(
@@ -1200,6 +1200,6 @@ void test_vloxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c
index 04ce3e45d544..77811878eae2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2(
@@ -54,7 +54,7 @@ void test_vloxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1(
@@ -79,7 +79,7 @@ void test_vloxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2(
@@ -104,7 +104,7 @@ void test_vloxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1(
@@ -129,7 +129,7 @@ void test_vloxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1(
@@ -154,7 +154,7 @@ void test_vloxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8(
@@ -179,7 +179,7 @@ void test_vloxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4(
@@ -204,7 +204,7 @@ void test_vloxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2(
@@ -229,7 +229,7 @@ void test_vloxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1(
@@ -254,7 +254,7 @@ void test_vloxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4(
@@ -279,7 +279,7 @@ void test_vloxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2(
@@ -304,7 +304,7 @@ void test_vloxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1(
@@ -329,7 +329,7 @@ void test_vloxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2(
@@ -354,7 +354,7 @@ void test_vloxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1(
@@ -379,7 +379,7 @@ void test_vloxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1(
@@ -404,7 +404,7 @@ void test_vloxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8(
@@ -429,7 +429,7 @@ void test_vloxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4(
@@ -454,7 +454,7 @@ void test_vloxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2(
@@ -479,7 +479,7 @@ void test_vloxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1(
@@ -504,7 +504,7 @@ void test_vloxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4(
@@ -529,7 +529,7 @@ void test_vloxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2(
@@ -554,7 +554,7 @@ void test_vloxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1(
@@ -579,7 +579,7 @@ void test_vloxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2(
@@ -604,7 +604,7 @@ void test_vloxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1(
@@ -629,7 +629,7 @@ void test_vloxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1(
@@ -654,7 +654,7 @@ void test_vloxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_m(
@@ -679,7 +679,7 @@ void test_vloxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_m(
@@ -704,7 +704,7 @@ void test_vloxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m(
@@ -729,7 +729,7 @@ void test_vloxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m(
@@ -754,7 +754,7 @@ void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_m(
@@ -779,7 +779,7 @@ void test_vloxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_m(
@@ -804,7 +804,7 @@ void test_vloxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m(
@@ -829,7 +829,7 @@ void test_vloxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m(
@@ -854,7 +854,7 @@ void test_vloxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m(
@@ -879,7 +879,7 @@ void test_vloxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_m(
@@ -904,7 +904,7 @@ void test_vloxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m(
@@ -929,7 +929,7 @@ void test_vloxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m(
@@ -954,7 +954,7 @@ void test_vloxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_m(
@@ -979,7 +979,7 @@ void test_vloxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m(
@@ -1004,7 +1004,7 @@ void test_vloxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_m(
@@ -1029,7 +1029,7 @@ void test_vloxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m(
@@ -1079,7 +1079,7 @@ void test_vloxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m(
@@ -1104,7 +1104,7 @@ void test_vloxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m(
@@ -1129,7 +1129,7 @@ void test_vloxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_m(
@@ -1154,7 +1154,7 @@ void test_vloxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m(
@@ -1179,7 +1179,7 @@ void test_vloxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m(
@@ -1204,7 +1204,7 @@ void test_vloxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_m(
@@ -1229,7 +1229,7 @@ void test_vloxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m(
@@ -1254,7 +1254,7 @@ void test_vloxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_m(
@@ -1279,7 +1279,7 @@ void test_vloxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_m(
@@ -1304,6 +1304,6 @@ void test_vloxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c
index 33186b1856b0..3a731fc33624 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2(
@@ -54,7 +54,7 @@ void test_vloxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1(
@@ -79,7 +79,7 @@ void test_vloxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2(
@@ -104,7 +104,7 @@ void test_vloxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1(
@@ -129,7 +129,7 @@ void test_vloxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1(
@@ -154,7 +154,7 @@ void test_vloxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8(
@@ -179,7 +179,7 @@ void test_vloxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4(
@@ -204,7 +204,7 @@ void test_vloxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2(
@@ -229,7 +229,7 @@ void test_vloxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1(
@@ -254,7 +254,7 @@ void test_vloxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4(
@@ -279,7 +279,7 @@ void test_vloxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2(
@@ -304,7 +304,7 @@ void test_vloxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1(
@@ -329,7 +329,7 @@ void test_vloxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2(
@@ -354,7 +354,7 @@ void test_vloxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1(
@@ -379,7 +379,7 @@ void test_vloxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1(
@@ -404,7 +404,7 @@ void test_vloxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8(
@@ -429,7 +429,7 @@ void test_vloxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4(
@@ -454,7 +454,7 @@ void test_vloxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2(
@@ -479,7 +479,7 @@ void test_vloxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1(
@@ -504,7 +504,7 @@ void test_vloxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4(
@@ -529,7 +529,7 @@ void test_vloxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2(
@@ -554,7 +554,7 @@ void test_vloxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1(
@@ -579,7 +579,7 @@ void test_vloxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2(
@@ -604,7 +604,7 @@ void test_vloxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1(
@@ -629,7 +629,7 @@ void test_vloxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1(
@@ -654,7 +654,7 @@ void test_vloxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_m(
@@ -679,7 +679,7 @@ void test_vloxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_m(
@@ -704,7 +704,7 @@ void test_vloxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m(
@@ -729,7 +729,7 @@ void test_vloxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m(
@@ -754,7 +754,7 @@ void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_m(
@@ -779,7 +779,7 @@ void test_vloxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_m(
@@ -804,7 +804,7 @@ void test_vloxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m(
@@ -829,7 +829,7 @@ void test_vloxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m(
@@ -854,7 +854,7 @@ void test_vloxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m(
@@ -879,7 +879,7 @@ void test_vloxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_m(
@@ -904,7 +904,7 @@ void test_vloxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m(
@@ -929,7 +929,7 @@ void test_vloxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m(
@@ -954,7 +954,7 @@ void test_vloxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_m(
@@ -979,7 +979,7 @@ void test_vloxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m(
@@ -1004,7 +1004,7 @@ void test_vloxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_m(
@@ -1029,7 +1029,7 @@ void test_vloxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m(
@@ -1079,7 +1079,7 @@ void test_vloxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m(
@@ -1104,7 +1104,7 @@ void test_vloxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m(
@@ -1129,7 +1129,7 @@ void test_vloxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m(
@@ -1154,7 +1154,7 @@ void test_vloxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m(
@@ -1179,7 +1179,7 @@ void test_vloxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m(
@@ -1204,7 +1204,7 @@ void test_vloxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_m(
@@ -1229,7 +1229,7 @@ void test_vloxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m(
@@ -1254,7 +1254,7 @@ void test_vloxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_m(
@@ -1279,7 +1279,7 @@ void test_vloxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_m(
@@ -1304,6 +1304,6 @@ void test_vloxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c
index 58115f72fbe1..8bfb5033e004 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2(
@@ -54,7 +54,7 @@ void test_vloxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1(
@@ -79,7 +79,7 @@ void test_vloxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2(
@@ -104,7 +104,7 @@ void test_vloxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1(
@@ -129,7 +129,7 @@ void test_vloxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1(
@@ -154,7 +154,7 @@ void test_vloxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8(
@@ -179,7 +179,7 @@ void test_vloxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4(
@@ -204,7 +204,7 @@ void test_vloxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2(
@@ -229,7 +229,7 @@ void test_vloxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1(
@@ -254,7 +254,7 @@ void test_vloxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4(
@@ -279,7 +279,7 @@ void test_vloxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2(
@@ -304,7 +304,7 @@ void test_vloxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1(
@@ -329,7 +329,7 @@ void test_vloxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2(
@@ -354,7 +354,7 @@ void test_vloxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1(
@@ -379,7 +379,7 @@ void test_vloxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1(
@@ -404,7 +404,7 @@ void test_vloxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8(
@@ -429,7 +429,7 @@ void test_vloxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4(
@@ -454,7 +454,7 @@ void test_vloxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2(
@@ -479,7 +479,7 @@ void test_vloxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1(
@@ -504,7 +504,7 @@ void test_vloxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4(
@@ -529,7 +529,7 @@ void test_vloxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2(
@@ -554,7 +554,7 @@ void test_vloxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1(
@@ -579,7 +579,7 @@ void test_vloxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2(
@@ -604,7 +604,7 @@ void test_vloxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1(
@@ -629,7 +629,7 @@ void test_vloxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1(
@@ -654,7 +654,7 @@ void test_vloxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_m(
@@ -679,7 +679,7 @@ void test_vloxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_m(
@@ -704,7 +704,7 @@ void test_vloxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m(
@@ -729,7 +729,7 @@ void test_vloxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m(
@@ -754,7 +754,7 @@ void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_m(
@@ -779,7 +779,7 @@ void test_vloxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_m(
@@ -804,7 +804,7 @@ void test_vloxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m(
@@ -829,7 +829,7 @@ void test_vloxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m(
@@ -854,7 +854,7 @@ void test_vloxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m(
@@ -879,7 +879,7 @@ void test_vloxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_m(
@@ -904,7 +904,7 @@ void test_vloxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m(
@@ -929,7 +929,7 @@ void test_vloxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m(
@@ -954,7 +954,7 @@ void test_vloxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_m(
@@ -979,7 +979,7 @@ void test_vloxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m(
@@ -1004,7 +1004,7 @@ void test_vloxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_m(
@@ -1029,7 +1029,7 @@ void test_vloxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m(
@@ -1079,7 +1079,7 @@ void test_vloxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m(
@@ -1104,7 +1104,7 @@ void test_vloxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m(
@@ -1129,7 +1129,7 @@ void test_vloxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_m(
@@ -1154,7 +1154,7 @@ void test_vloxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m(
@@ -1179,7 +1179,7 @@ void test_vloxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m(
@@ -1204,7 +1204,7 @@ void test_vloxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_m(
@@ -1229,7 +1229,7 @@ void test_vloxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m(
@@ -1254,7 +1254,7 @@ void test_vloxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_m(
@@ -1279,7 +1279,7 @@ void test_vloxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_m(
@@ -1304,6 +1304,6 @@ void test_vloxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c
index 42603363ac12..5d18525351a2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2(
@@ -54,7 +54,7 @@ void test_vloxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1(
@@ -79,7 +79,7 @@ void test_vloxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2(
@@ -104,7 +104,7 @@ void test_vloxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1(
@@ -129,7 +129,7 @@ void test_vloxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1(
@@ -154,7 +154,7 @@ void test_vloxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8(
@@ -179,7 +179,7 @@ void test_vloxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4(
@@ -204,7 +204,7 @@ void test_vloxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2(
@@ -229,7 +229,7 @@ void test_vloxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1(
@@ -254,7 +254,7 @@ void test_vloxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4(
@@ -279,7 +279,7 @@ void test_vloxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2(
@@ -304,7 +304,7 @@ void test_vloxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1(
@@ -329,7 +329,7 @@ void test_vloxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2(
@@ -354,7 +354,7 @@ void test_vloxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1(
@@ -379,7 +379,7 @@ void test_vloxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1(
@@ -404,7 +404,7 @@ void test_vloxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8(
@@ -429,7 +429,7 @@ void test_vloxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4(
@@ -454,7 +454,7 @@ void test_vloxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2(
@@ -479,7 +479,7 @@ void test_vloxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1(
@@ -504,7 +504,7 @@ void test_vloxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4(
@@ -529,7 +529,7 @@ void test_vloxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2(
@@ -554,7 +554,7 @@ void test_vloxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1(
@@ -579,7 +579,7 @@ void test_vloxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2(
@@ -604,7 +604,7 @@ void test_vloxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1(
@@ -629,7 +629,7 @@ void test_vloxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1(
@@ -654,7 +654,7 @@ void test_vloxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_m(
@@ -679,7 +679,7 @@ void test_vloxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_m(
@@ -704,7 +704,7 @@ void test_vloxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m(
@@ -729,7 +729,7 @@ void test_vloxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m(
@@ -754,7 +754,7 @@ void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_m(
@@ -779,7 +779,7 @@ void test_vloxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_m(
@@ -804,7 +804,7 @@ void test_vloxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m(
@@ -829,7 +829,7 @@ void test_vloxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m(
@@ -854,7 +854,7 @@ void test_vloxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m(
@@ -879,7 +879,7 @@ void test_vloxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_m(
@@ -904,7 +904,7 @@ void test_vloxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m(
@@ -929,7 +929,7 @@ void test_vloxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m(
@@ -954,7 +954,7 @@ void test_vloxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_m(
@@ -979,7 +979,7 @@ void test_vloxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m(
@@ -1004,7 +1004,7 @@ void test_vloxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_m(
@@ -1029,7 +1029,7 @@ void test_vloxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_m(
@@ -1054,7 +1054,7 @@ void test_vloxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m(
@@ -1079,7 +1079,7 @@ void test_vloxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m(
@@ -1104,7 +1104,7 @@ void test_vloxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m(
@@ -1129,7 +1129,7 @@ void test_vloxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_m(
@@ -1154,7 +1154,7 @@ void test_vloxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m(
@@ -1179,7 +1179,7 @@ void test_vloxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m(
@@ -1204,7 +1204,7 @@ void test_vloxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_m(
@@ -1229,7 +1229,7 @@ void test_vloxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m(
@@ -1254,7 +1254,7 @@ void test_vloxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_m(
@@ -1279,7 +1279,7 @@ void test_vloxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_m(
@@ -1304,6 +1304,6 @@ void test_vloxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse16.c
index d9f952fb09eb..683c08c3277d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, ptrdi
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, ptrdi
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, const _Float16 *base, ptrdiff
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, const _Float16 *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, const _Float16 *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, const _Float16 *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m(
@@ -76,7 +76,7 @@ vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, const int16_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m(
@@ -85,7 +85,7 @@ vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, const int16_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m(
@@ -94,7 +94,7 @@ vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, const int16_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m(
@@ -103,7 +103,7 @@ vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, const int16_t *base, ptrdiff_t b
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m(
@@ -112,7 +112,7 @@ vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, const int16_t *base, ptrdiff_t b
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m(
@@ -121,7 +121,7 @@ vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, const int16_t *base, ptrdiff_t b
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m(
@@ -130,7 +130,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, ptrdif
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m(
@@ -139,7 +139,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, ptrdif
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m(
@@ -148,7 +148,7 @@ vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, const uint16_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m(
@@ -157,7 +157,7 @@ vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m(
@@ -166,6 +166,6 @@ vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, const uint16_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16(mask, base, bstride, vl);
+ return __riscv_vlse16(mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse32.c
index b170b5859daf..913938be38de 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, const float *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m(
@@ -31,7 +31,7 @@ vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, const float *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m(
@@ -40,7 +40,7 @@ vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, const float *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m(
@@ -49,7 +49,7 @@ vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, const float *base, ptrdiff_t b
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m(
@@ -58,7 +58,7 @@ vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, const float *base, ptrdiff_t b
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m(
@@ -67,7 +67,7 @@ vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, const int32_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m(
@@ -76,7 +76,7 @@ vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, const int32_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m(
@@ -85,7 +85,7 @@ vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, const int32_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m(
@@ -94,7 +94,7 @@ vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, const int32_t *base, ptrdiff_t b
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m(
@@ -103,7 +103,7 @@ vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, const int32_t *base, ptrdiff_t b
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m(
@@ -112,7 +112,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, ptrdif
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m(
@@ -121,7 +121,7 @@ vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, const uint32_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m(
@@ -130,7 +130,7 @@ vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, const uint32_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m(
@@ -139,6 +139,6 @@ vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, const uint32_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32(mask, base, bstride, vl);
+ return __riscv_vlse32(mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse64.c
index 1de0e53cbdda..28acb5aed021 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m(
@@ -22,7 +22,7 @@ vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, const double *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m(
@@ -31,7 +31,7 @@ vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, const double *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m(
@@ -40,7 +40,7 @@ vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, const double *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m(
@@ -49,7 +49,7 @@ vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, const double *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m(
@@ -58,7 +58,7 @@ vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, const int64_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m(
@@ -67,7 +67,7 @@ vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, const int64_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m(
@@ -76,7 +76,7 @@ vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, const int64_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m(
@@ -85,7 +85,7 @@ vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, const int64_t *base, ptrdiff_t b
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m(
@@ -94,7 +94,7 @@ vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, const uint64_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m(
@@ -103,7 +103,7 @@ vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, const uint64_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m(
@@ -112,6 +112,6 @@ vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, const uint64_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64(mask, base, bstride, vl);
+ return __riscv_vlse64(mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse8.c
index ad2782c0fbed..e16a3da6e934 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m(
@@ -21,7 +21,7 @@ vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m(
@@ -30,7 +30,7 @@ vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m(
@@ -39,7 +39,7 @@ vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bs
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m(
@@ -48,7 +48,7 @@ vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstri
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m(
@@ -57,7 +57,7 @@ vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstri
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m(
@@ -66,7 +66,7 @@ vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstri
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m(
@@ -75,7 +75,7 @@ vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, const int8_t *base, ptrdiff_t bstri
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m(
@@ -84,7 +84,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m(
@@ -93,7 +93,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m(
@@ -102,7 +102,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m(
@@ -111,7 +111,7 @@ vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bst
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m(
@@ -120,7 +120,7 @@ vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bst
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m(
@@ -129,6 +129,6 @@ vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bst
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8(mask, base, bstride, vl);
+ return __riscv_vlse8(mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16.c
index 96ec6de4189e..5180bb28c249 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m(
@@ -30,7 +30,7 @@ void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m(
@@ -43,7 +43,7 @@ void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m(
@@ -56,7 +56,7 @@ void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m(
@@ -69,7 +69,7 @@ void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m(
@@ -82,7 +82,7 @@ void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m(
@@ -95,7 +95,7 @@ void test_vlseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m(
@@ -108,7 +108,7 @@ void test_vlseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m(
@@ -121,7 +121,7 @@ void test_vlseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m(
@@ -134,7 +134,7 @@ void test_vlseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m(
@@ -147,7 +147,7 @@ void test_vlseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m(
@@ -160,7 +160,7 @@ void test_vlseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m(
@@ -173,7 +173,7 @@ void test_vlseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m(
@@ -186,7 +186,7 @@ void test_vlseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m(
@@ -199,6 +199,6 @@ void test_vlseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, size_t vl) {
- return vlseg2e16(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e16(v0, v1, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c
index afdc67b42442..62d369fa8f61 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_m(
@@ -34,7 +34,7 @@ void test_vlseg2e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_m(
@@ -49,7 +49,7 @@ void test_vlseg2e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_m(
@@ -64,7 +64,7 @@ void test_vlseg2e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_m(
@@ -79,7 +79,7 @@ void test_vlseg2e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m(
@@ -94,7 +94,7 @@ void test_vlseg2e16ff_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m(
@@ -109,7 +109,7 @@ void test_vlseg2e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m(
@@ -124,7 +124,7 @@ void test_vlseg2e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m(
@@ -139,7 +139,7 @@ void test_vlseg2e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m(
@@ -154,7 +154,7 @@ void test_vlseg2e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m(
@@ -169,7 +169,7 @@ void test_vlseg2e16ff_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m(
@@ -184,7 +184,7 @@ void test_vlseg2e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m(
@@ -199,7 +199,7 @@ void test_vlseg2e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m(
@@ -214,7 +214,7 @@ void test_vlseg2e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m(
@@ -229,6 +229,6 @@ void test_vlseg2e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e16ff(v0, v1, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32.c
index 7485fd67eb14..b4392850cd5d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m(
@@ -30,7 +30,7 @@ void test_vlseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m(
@@ -43,7 +43,7 @@ void test_vlseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m(
@@ -56,7 +56,7 @@ void test_vlseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m(
@@ -69,7 +69,7 @@ void test_vlseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m(
@@ -82,7 +82,7 @@ void test_vlseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m(
@@ -95,7 +95,7 @@ void test_vlseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m(
@@ -108,7 +108,7 @@ void test_vlseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m(
@@ -121,7 +121,7 @@ void test_vlseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m(
@@ -134,7 +134,7 @@ void test_vlseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m(
@@ -147,7 +147,7 @@ void test_vlseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m(
@@ -160,6 +160,6 @@ void test_vlseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, size_t vl) {
- return vlseg2e32(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e32(v0, v1, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c
index 60ee80b00858..93563fa8ba54 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m(
@@ -34,7 +34,7 @@ void test_vlseg2e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m(
@@ -49,7 +49,7 @@ void test_vlseg2e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m(
@@ -64,7 +64,7 @@ void test_vlseg2e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m(
@@ -79,7 +79,7 @@ void test_vlseg2e32ff_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m(
@@ -94,7 +94,7 @@ void test_vlseg2e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m(
@@ -109,7 +109,7 @@ void test_vlseg2e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m(
@@ -124,7 +124,7 @@ void test_vlseg2e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m(
@@ -139,7 +139,7 @@ void test_vlseg2e32ff_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m(
@@ -154,7 +154,7 @@ void test_vlseg2e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m(
@@ -169,7 +169,7 @@ void test_vlseg2e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m(
@@ -184,6 +184,6 @@ void test_vlseg2e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e32ff(v0, v1, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64.c
index 9dbc1e4e77eb..40734d2a227e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, size_t vl) {
- return vlseg2e64(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e64(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m(
@@ -30,7 +30,7 @@ void test_vlseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, size_t vl) {
- return vlseg2e64(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e64(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m(
@@ -43,7 +43,7 @@ void test_vlseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, size_t vl) {
- return vlseg2e64(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e64(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m(
@@ -56,7 +56,7 @@ void test_vlseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, size_t vl) {
- return vlseg2e64(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e64(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m(
@@ -69,7 +69,7 @@ void test_vlseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, size_t vl) {
- return vlseg2e64(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e64(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m(
@@ -82,7 +82,7 @@ void test_vlseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, size_t vl) {
- return vlseg2e64(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e64(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m(
@@ -95,7 +95,7 @@ void test_vlseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, size_t vl) {
- return vlseg2e64(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e64(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m(
@@ -108,7 +108,7 @@ void test_vlseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, size_t vl) {
- return vlseg2e64(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e64(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m(
@@ -121,6 +121,6 @@ void test_vlseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, size_t vl) {
- return vlseg2e64(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e64(v0, v1, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c
index 6fa5ea0c69d3..a861b31860ea 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m(
@@ -34,7 +34,7 @@ void test_vlseg2e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m(
@@ -49,7 +49,7 @@ void test_vlseg2e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m(
@@ -64,7 +64,7 @@ void test_vlseg2e64ff_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m(
@@ -79,7 +79,7 @@ void test_vlseg2e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m(
@@ -94,7 +94,7 @@ void test_vlseg2e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m(
@@ -109,7 +109,7 @@ void test_vlseg2e64ff_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m(
@@ -124,7 +124,7 @@ void test_vlseg2e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m(
@@ -139,6 +139,6 @@ void test_vlseg2e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e64ff(v0, v1, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8.c
index 88f909ecbcd2..d3f7dc92f787 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m(
@@ -29,7 +29,7 @@ void test_vlseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m(
@@ -42,7 +42,7 @@ void test_vlseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m(
@@ -55,7 +55,7 @@ void test_vlseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m(
@@ -68,7 +68,7 @@ void test_vlseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const i
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m(
@@ -81,7 +81,7 @@ void test_vlseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const i
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m(
@@ -94,7 +94,7 @@ void test_vlseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const i
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m(
@@ -107,7 +107,7 @@ void test_vlseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m(
@@ -120,7 +120,7 @@ void test_vlseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m(
@@ -133,7 +133,7 @@ void test_vlseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m(
@@ -146,7 +146,7 @@ void test_vlseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m(
@@ -159,6 +159,6 @@ void test_vlseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, size_t vl) {
- return vlseg2e8(v0, v1, mask, base, vl);
+ return __riscv_vlseg2e8(v0, v1, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c
index a69d1caafb8e..57c49da93db9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m(
@@ -34,7 +34,7 @@ void test_vlseg2e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m(
@@ -49,7 +49,7 @@ void test_vlseg2e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m(
@@ -64,7 +64,7 @@ void test_vlseg2e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m(
@@ -79,7 +79,7 @@ void test_vlseg2e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m(
@@ -94,7 +94,7 @@ void test_vlseg2e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m(
@@ -109,7 +109,7 @@ void test_vlseg2e8ff_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m(
@@ -124,7 +124,7 @@ void test_vlseg2e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m(
@@ -139,7 +139,7 @@ void test_vlseg2e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m(
@@ -154,7 +154,7 @@ void test_vlseg2e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m(
@@ -169,7 +169,7 @@ void test_vlseg2e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m(
@@ -184,6 +184,6 @@ void test_vlseg2e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
+ return __riscv_vlseg2e8ff(v0, v1, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16.c
index 4b29e529060a..79bec60ac2b7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m(
@@ -34,7 +34,7 @@ void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m(
@@ -49,7 +49,7 @@ void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m(
@@ -64,7 +64,7 @@ void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m(
@@ -79,7 +79,7 @@ void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m(
@@ -94,7 +94,7 @@ void test_vlseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m(
@@ -109,7 +109,7 @@ void test_vlseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m(
@@ -124,7 +124,7 @@ void test_vlseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m(
@@ -139,7 +139,7 @@ void test_vlseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m(
@@ -154,7 +154,7 @@ void test_vlseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m(
@@ -169,7 +169,7 @@ void test_vlseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m(
@@ -184,6 +184,6 @@ void test_vlseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, size_t vl) {
- return vlseg3e16(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e16(v0, v1, v2, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c
index ebc01dcebc58..f310ead87fb4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_m(
@@ -38,7 +38,7 @@ void test_vlseg3e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_m(
@@ -55,7 +55,7 @@ void test_vlseg3e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_m(
@@ -72,7 +72,7 @@ void test_vlseg3e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m(
@@ -89,7 +89,7 @@ void test_vlseg3e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m(
@@ -106,7 +106,7 @@ void test_vlseg3e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m(
@@ -123,7 +123,7 @@ void test_vlseg3e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m(
@@ -140,7 +140,7 @@ void test_vlseg3e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m(
@@ -157,7 +157,7 @@ void test_vlseg3e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m(
@@ -174,7 +174,7 @@ void test_vlseg3e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m(
@@ -191,7 +191,7 @@ void test_vlseg3e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m(
@@ -208,6 +208,6 @@ void test_vlseg3e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32.c
index 180fc02f0184..9b8527afa0a0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, size_t vl) {
- return vlseg3e32(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e32(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m(
@@ -34,7 +34,7 @@ void test_vlseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, size_t vl) {
- return vlseg3e32(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e32(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m(
@@ -49,7 +49,7 @@ void test_vlseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, size_t vl) {
- return vlseg3e32(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e32(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m(
@@ -64,7 +64,7 @@ void test_vlseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, size_t vl) {
- return vlseg3e32(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e32(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m(
@@ -79,7 +79,7 @@ void test_vlseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, size_t vl) {
- return vlseg3e32(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e32(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m(
@@ -94,7 +94,7 @@ void test_vlseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, size_t vl) {
- return vlseg3e32(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e32(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m(
@@ -109,7 +109,7 @@ void test_vlseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, size_t vl) {
- return vlseg3e32(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e32(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m(
@@ -124,7 +124,7 @@ void test_vlseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, size_t vl) {
- return vlseg3e32(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e32(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m(
@@ -139,6 +139,6 @@ void test_vlseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, size_t vl) {
- return vlseg3e32(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e32(v0, v1, v2, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c
index 7b586a3f77a1..03c9d29f16bf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m(
@@ -38,7 +38,7 @@ void test_vlseg3e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m(
@@ -55,7 +55,7 @@ void test_vlseg3e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m(
@@ -72,7 +72,7 @@ void test_vlseg3e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m(
@@ -89,7 +89,7 @@ void test_vlseg3e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m(
@@ -106,7 +106,7 @@ void test_vlseg3e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m(
@@ -123,7 +123,7 @@ void test_vlseg3e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m(
@@ -140,7 +140,7 @@ void test_vlseg3e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m(
@@ -157,6 +157,6 @@ void test_vlseg3e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64.c
index 8941e6f094f2..480b3c9cbf93 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, size_t vl) {
- return vlseg3e64(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e64(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m(
@@ -34,7 +34,7 @@ void test_vlseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, size_t vl) {
- return vlseg3e64(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e64(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vlseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, size_t vl) {
- return vlseg3e64(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e64(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m(
@@ -64,7 +64,7 @@ void test_vlseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, size_t vl) {
- return vlseg3e64(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e64(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m(
@@ -79,7 +79,7 @@ void test_vlseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, size_t vl) {
- return vlseg3e64(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e64(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m(
@@ -94,6 +94,6 @@ void test_vlseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, size_t vl) {
- return vlseg3e64(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e64(v0, v1, v2, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c
index d9da3463111b..8cce3f6f7176 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m(
@@ -38,7 +38,7 @@ void test_vlseg3e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m(
@@ -55,7 +55,7 @@ void test_vlseg3e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m(
@@ -72,7 +72,7 @@ void test_vlseg3e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m(
@@ -89,7 +89,7 @@ void test_vlseg3e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m(
@@ -106,6 +106,6 @@ void test_vlseg3e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8.c
index fb71b5c3e8cf..37726e63808c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8.c
@@ -18,7 +18,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m(
@@ -33,7 +33,7 @@ void test_vlseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m(
@@ -48,7 +48,7 @@ void test_vlseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m(
@@ -63,7 +63,7 @@ void test_vlseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m(
@@ -78,7 +78,7 @@ void test_vlseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m(
@@ -93,7 +93,7 @@ void test_vlseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m(
@@ -108,7 +108,7 @@ void test_vlseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m(
@@ -123,7 +123,7 @@ void test_vlseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m(
@@ -138,7 +138,7 @@ void test_vlseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m(
@@ -153,6 +153,6 @@ void test_vlseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vboo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, size_t vl) {
- return vlseg3e8(v0, v1, v2, mask, base, vl);
+ return __riscv_vlseg3e8(v0, v1, v2, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c
index b4b94b91fce7..770d0dd2c7f2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m(
@@ -38,7 +38,7 @@ void test_vlseg3e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m(
@@ -55,7 +55,7 @@ void test_vlseg3e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m(
@@ -72,7 +72,7 @@ void test_vlseg3e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m(
@@ -89,7 +89,7 @@ void test_vlseg3e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m(
@@ -106,7 +106,7 @@ void test_vlseg3e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m(
@@ -123,7 +123,7 @@ void test_vlseg3e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m(
@@ -140,7 +140,7 @@ void test_vlseg3e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m(
@@ -157,7 +157,7 @@ void test_vlseg3e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m(
@@ -174,6 +174,6 @@ void test_vlseg3e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
+ return __riscv_vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16.c
index 171a7fea85c9..afdb258bac50 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m(
@@ -38,7 +38,7 @@ void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m(
@@ -55,7 +55,7 @@ void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m(
@@ -72,7 +72,7 @@ void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m(
@@ -89,7 +89,7 @@ void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m(
@@ -106,7 +106,7 @@ void test_vlseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m(
@@ -123,7 +123,7 @@ void test_vlseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m(
@@ -140,7 +140,7 @@ void test_vlseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m(
@@ -157,7 +157,7 @@ void test_vlseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m(
@@ -174,7 +174,7 @@ void test_vlseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m(
@@ -191,7 +191,7 @@ void test_vlseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m(
@@ -208,6 +208,6 @@ void test_vlseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, size_t vl) {
- return vlseg4e16(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e16(v0, v1, v2, v3, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c
index c64330edd2fc..d902c128a55c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_m(
@@ -42,7 +42,7 @@ void test_vlseg4e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_m(
@@ -61,7 +61,7 @@ void test_vlseg4e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_m(
@@ -80,7 +80,7 @@ void test_vlseg4e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m(
@@ -99,7 +99,7 @@ void test_vlseg4e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m(
@@ -118,7 +118,7 @@ void test_vlseg4e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m(
@@ -137,7 +137,7 @@ void test_vlseg4e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m(
@@ -156,7 +156,7 @@ void test_vlseg4e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m(
@@ -175,7 +175,7 @@ void test_vlseg4e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m(
@@ -194,7 +194,7 @@ void test_vlseg4e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m(
@@ -213,7 +213,7 @@ void test_vlseg4e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m(
@@ -232,6 +232,6 @@ void test_vlseg4e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32.c
index 7a083f7708fe..1ccfa14e8724 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, size_t vl) {
- return vlseg4e32(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e32(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m(
@@ -38,7 +38,7 @@ void test_vlseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, size_t vl) {
- return vlseg4e32(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e32(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m(
@@ -55,7 +55,7 @@ void test_vlseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, size_t vl) {
- return vlseg4e32(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e32(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m(
@@ -72,7 +72,7 @@ void test_vlseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, size_t vl) {
- return vlseg4e32(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e32(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m(
@@ -89,7 +89,7 @@ void test_vlseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, size_t vl) {
- return vlseg4e32(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e32(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m(
@@ -106,7 +106,7 @@ void test_vlseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, size_t vl) {
- return vlseg4e32(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e32(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m(
@@ -123,7 +123,7 @@ void test_vlseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, size_t vl) {
- return vlseg4e32(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e32(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m(
@@ -140,7 +140,7 @@ void test_vlseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, size_t vl) {
- return vlseg4e32(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e32(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m(
@@ -157,6 +157,6 @@ void test_vlseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, size_t vl) {
- return vlseg4e32(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e32(v0, v1, v2, v3, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c
index d82880955da4..c5b868f67019 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m(
@@ -42,7 +42,7 @@ void test_vlseg4e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m(
@@ -61,7 +61,7 @@ void test_vlseg4e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m(
@@ -80,7 +80,7 @@ void test_vlseg4e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m(
@@ -99,7 +99,7 @@ void test_vlseg4e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m(
@@ -118,7 +118,7 @@ void test_vlseg4e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m(
@@ -137,7 +137,7 @@ void test_vlseg4e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m(
@@ -156,7 +156,7 @@ void test_vlseg4e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m(
@@ -175,6 +175,6 @@ void test_vlseg4e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64.c
index 9166595cb745..f6f409b8fa65 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, size_t vl) {
- return vlseg4e64(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e64(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m(
@@ -38,7 +38,7 @@ void test_vlseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, size_t vl) {
- return vlseg4e64(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e64(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m(
@@ -55,7 +55,7 @@ void test_vlseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, size_t vl) {
- return vlseg4e64(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e64(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m(
@@ -72,7 +72,7 @@ void test_vlseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, size_t vl) {
- return vlseg4e64(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e64(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m(
@@ -89,7 +89,7 @@ void test_vlseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, size_t vl) {
- return vlseg4e64(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e64(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m(
@@ -106,6 +106,6 @@ void test_vlseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, size_t vl) {
- return vlseg4e64(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e64(v0, v1, v2, v3, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c
index a0d6d952a0ba..94cdfbab240c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m(
@@ -42,7 +42,7 @@ void test_vlseg4e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m(
@@ -61,7 +61,7 @@ void test_vlseg4e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m(
@@ -80,7 +80,7 @@ void test_vlseg4e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m(
@@ -99,7 +99,7 @@ void test_vlseg4e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m(
@@ -118,6 +118,6 @@ void test_vlseg4e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8.c
index dbc3b40b4c93..54f55f5ed55d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8.c
@@ -20,7 +20,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m(
@@ -37,7 +37,7 @@ void test_vlseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m(
@@ -54,7 +54,7 @@ void test_vlseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m(
@@ -71,7 +71,7 @@ void test_vlseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m(
@@ -88,7 +88,7 @@ void test_vlseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m(
@@ -105,7 +105,7 @@ void test_vlseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m(
@@ -122,7 +122,7 @@ void test_vlseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m(
@@ -139,7 +139,7 @@ void test_vlseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m(
@@ -156,7 +156,7 @@ void test_vlseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m(
@@ -173,6 +173,6 @@ void test_vlseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, size_t vl) {
- return vlseg4e8(v0, v1, v2, v3, mask, base, vl);
+ return __riscv_vlseg4e8(v0, v1, v2, v3, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c
index 76e53c1a8208..ba0c1f5f4a5c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m(
@@ -42,7 +42,7 @@ void test_vlseg4e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m(
@@ -61,7 +61,7 @@ void test_vlseg4e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m(
@@ -80,7 +80,7 @@ void test_vlseg4e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m(
@@ -99,7 +99,7 @@ void test_vlseg4e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m(
@@ -118,7 +118,7 @@ void test_vlseg4e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m(
@@ -137,7 +137,7 @@ void test_vlseg4e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m(
@@ -156,7 +156,7 @@ void test_vlseg4e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m(
@@ -175,7 +175,7 @@ void test_vlseg4e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m(
@@ -194,6 +194,6 @@ void test_vlseg4e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
+ return __riscv_vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16.c
index ce2bd8b49cc4..1909aa93467c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, size_t vl) {
- return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m(
@@ -42,7 +42,7 @@ void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, size_t vl) {
- return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m(
@@ -61,7 +61,7 @@ void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, size_t vl) {
- return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m(
@@ -80,7 +80,7 @@ void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, size_t vl) {
- return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m(
@@ -99,7 +99,7 @@ void test_vlseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, size_t vl) {
- return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m(
@@ -118,7 +118,7 @@ void test_vlseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, size_t vl) {
- return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m(
@@ -137,7 +137,7 @@ void test_vlseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, size_t vl) {
- return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m(
@@ -156,7 +156,7 @@ void test_vlseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, size_t vl) {
- return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m(
@@ -175,6 +175,6 @@ void test_vlseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, size_t vl) {
- return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c
index 0968ed7fc137..a7390588a3e9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_m(
@@ -46,7 +46,7 @@ void test_vlseg5e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_m(
@@ -67,7 +67,7 @@ void test_vlseg5e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m(
@@ -88,7 +88,7 @@ void test_vlseg5e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m(
@@ -109,7 +109,7 @@ void test_vlseg5e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m(
@@ -130,7 +130,7 @@ void test_vlseg5e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m(
@@ -151,7 +151,7 @@ void test_vlseg5e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m(
@@ -172,7 +172,7 @@ void test_vlseg5e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m(
@@ -193,6 +193,6 @@ void test_vlseg5e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32.c
index fd18295b8573..d3f165789e6a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, size_t vl) {
- return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m(
@@ -42,7 +42,7 @@ void test_vlseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, size_t vl) {
- return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m(
@@ -61,7 +61,7 @@ void test_vlseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, size_t vl) {
- return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m(
@@ -80,7 +80,7 @@ void test_vlseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, size_t vl) {
- return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m(
@@ -99,7 +99,7 @@ void test_vlseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, size_t vl) {
- return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m(
@@ -118,6 +118,6 @@ void test_vlseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, size_t vl) {
- return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c
index 28b350d8c22c..866b6ce4d27c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m(
@@ -46,7 +46,7 @@ void test_vlseg5e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m(
@@ -67,7 +67,7 @@ void test_vlseg5e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m(
@@ -88,7 +88,7 @@ void test_vlseg5e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m(
@@ -109,7 +109,7 @@ void test_vlseg5e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m(
@@ -130,6 +130,6 @@ void test_vlseg5e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64.c
index 1f4166aac41e..b63e3a0a1f9a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, size_t vl) {
- return vlseg5e64(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e64(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m(
@@ -42,7 +42,7 @@ void test_vlseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, size_t vl) {
- return vlseg5e64(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e64(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m(
@@ -61,6 +61,6 @@ void test_vlseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, size_t vl) {
- return vlseg5e64(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e64(v0, v1, v2, v3, v4, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c
index 8f3a08963f4d..f9630093c60e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e64ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m(
@@ -46,7 +46,7 @@ void test_vlseg5e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e64ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m(
@@ -67,6 +67,6 @@ void test_vlseg5e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e64ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8.c
index f4b2e527f469..fbbbaad756ca 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8.c
@@ -22,7 +22,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, size_t vl) {
- return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m(
@@ -41,7 +41,7 @@ void test_vlseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, size_t vl) {
- return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m(
@@ -60,7 +60,7 @@ void test_vlseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, size_t vl) {
- return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m(
@@ -79,7 +79,7 @@ void test_vlseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, size_t vl) {
- return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m(
@@ -98,7 +98,7 @@ void test_vlseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, size_t vl) {
- return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m(
@@ -117,7 +117,7 @@ void test_vlseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, size_t vl) {
- return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m(
@@ -136,7 +136,7 @@ void test_vlseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, size_t vl) {
- return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m(
@@ -155,6 +155,6 @@ void test_vlseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, size_t vl) {
- return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
+ return __riscv_vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c
index 45022dc41fa3..ce1014bf7150 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m(
@@ -46,7 +46,7 @@ void test_vlseg5e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m(
@@ -67,7 +67,7 @@ void test_vlseg5e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m(
@@ -88,7 +88,7 @@ void test_vlseg5e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m(
@@ -109,7 +109,7 @@ void test_vlseg5e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m(
@@ -130,7 +130,7 @@ void test_vlseg5e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m(
@@ -151,7 +151,7 @@ void test_vlseg5e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m(
@@ -172,6 +172,6 @@ void test_vlseg5e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
+ return __riscv_vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16.c
index 7d536e0df000..1b60fbfa3f95 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, size_t vl) {
- return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m(
@@ -46,7 +46,7 @@ void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, size_t vl) {
- return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m(
@@ -67,7 +67,7 @@ void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, size_t vl) {
- return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m(
@@ -88,7 +88,7 @@ void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, size_t vl) {
- return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m(
@@ -109,7 +109,7 @@ void test_vlseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, size_t vl) {
- return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m(
@@ -130,7 +130,7 @@ void test_vlseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, size_t vl) {
- return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m(
@@ -151,7 +151,7 @@ void test_vlseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, size_t vl) {
- return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m(
@@ -172,7 +172,7 @@ void test_vlseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, size_t vl) {
- return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m(
@@ -193,6 +193,6 @@ void test_vlseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, size_t vl) {
- return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c
index a74be8995f76..33b0d207c751 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_m(
@@ -50,7 +50,7 @@ void test_vlseg6e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_m(
@@ -73,7 +73,7 @@ void test_vlseg6e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m(
@@ -96,7 +96,7 @@ void test_vlseg6e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m(
@@ -119,7 +119,7 @@ void test_vlseg6e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m(
@@ -142,7 +142,7 @@ void test_vlseg6e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m(
@@ -165,7 +165,7 @@ void test_vlseg6e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m(
@@ -188,7 +188,7 @@ void test_vlseg6e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m(
@@ -211,6 +211,6 @@ void test_vlseg6e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32.c
index 368a77de7004..d79851f27fe7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, size_t vl) {
- return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m(
@@ -46,7 +46,7 @@ void test_vlseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, size_t vl) {
- return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m(
@@ -67,7 +67,7 @@ void test_vlseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, size_t vl) {
- return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m(
@@ -88,7 +88,7 @@ void test_vlseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, size_t vl) {
- return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m(
@@ -109,7 +109,7 @@ void test_vlseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, size_t vl) {
- return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m(
@@ -130,6 +130,6 @@ void test_vlseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, size_t vl) {
- return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c
index 8f921b1e8c02..26c88b3f67d4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m(
@@ -50,7 +50,7 @@ void test_vlseg6e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m(
@@ -73,7 +73,7 @@ void test_vlseg6e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m(
@@ -96,7 +96,7 @@ void test_vlseg6e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m(
@@ -119,7 +119,7 @@ void test_vlseg6e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m(
@@ -142,6 +142,6 @@ void test_vlseg6e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64.c
index 51bb15ced815..f95593537c57 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, size_t vl) {
- return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e64(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m(
@@ -46,7 +46,7 @@ void test_vlseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, size_t vl) {
- return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e64(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m(
@@ -67,6 +67,6 @@ void test_vlseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, size_t vl) {
- return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e64(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c
index 5ae07a2e1b49..5699c40dcc22 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m(
@@ -50,7 +50,7 @@ void test_vlseg6e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m(
@@ -73,6 +73,6 @@ void test_vlseg6e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8.c
index aeba7d881cb7..64c6c8aa6ab8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8.c
@@ -24,7 +24,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, size_t vl) {
- return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m(
@@ -45,7 +45,7 @@ void test_vlseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, size_t vl) {
- return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m(
@@ -66,7 +66,7 @@ void test_vlseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, size_t vl) {
- return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m(
@@ -87,7 +87,7 @@ void test_vlseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, size_t vl) {
- return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m(
@@ -108,7 +108,7 @@ void test_vlseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, size_t vl) {
- return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vlseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, size_t vl) {
- return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m(
@@ -150,7 +150,7 @@ void test_vlseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, size_t vl) {
- return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m(
@@ -171,6 +171,6 @@ void test_vlseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, size_t vl) {
- return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
+ return __riscv_vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c
index 1bf7b3278c99..4023e3880db0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m(
@@ -50,7 +50,7 @@ void test_vlseg6e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m(
@@ -73,7 +73,7 @@ void test_vlseg6e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m(
@@ -96,7 +96,7 @@ void test_vlseg6e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m(
@@ -119,7 +119,7 @@ void test_vlseg6e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m(
@@ -142,7 +142,7 @@ void test_vlseg6e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m(
@@ -165,7 +165,7 @@ void test_vlseg6e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m(
@@ -188,6 +188,6 @@ void test_vlseg6e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
+ return __riscv_vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16.c
index e52ebbd63044..c9573333f65e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, size_t vl) {
- return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m(
@@ -50,7 +50,7 @@ void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, size_t vl) {
- return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m(
@@ -73,7 +73,7 @@ void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, size_t vl) {
- return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m(
@@ -96,7 +96,7 @@ void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, size_t vl) {
- return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m(
@@ -119,7 +119,7 @@ void test_vlseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, size_t vl) {
- return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m(
@@ -142,7 +142,7 @@ void test_vlseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, size_t vl) {
- return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m(
@@ -165,7 +165,7 @@ void test_vlseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, size_t vl) {
- return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m(
@@ -188,7 +188,7 @@ void test_vlseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, size_t vl) {
- return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m(
@@ -211,6 +211,6 @@ void test_vlseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, size_t vl) {
- return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c
index fe0ec8f2300e..a209eaf1a8b3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_m(
@@ -54,7 +54,7 @@ void test_vlseg7e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_m(
@@ -79,7 +79,7 @@ void test_vlseg7e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m(
@@ -104,7 +104,7 @@ void test_vlseg7e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m(
@@ -129,7 +129,7 @@ void test_vlseg7e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m(
@@ -154,7 +154,7 @@ void test_vlseg7e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m(
@@ -179,7 +179,7 @@ void test_vlseg7e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m(
@@ -204,7 +204,7 @@ void test_vlseg7e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m(
@@ -229,6 +229,6 @@ void test_vlseg7e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32.c
index d1516f87a1e9..3b44b02fcce8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, size_t vl) {
- return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m(
@@ -50,7 +50,7 @@ void test_vlseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, size_t vl) {
- return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m(
@@ -73,7 +73,7 @@ void test_vlseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, size_t vl) {
- return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m(
@@ -96,7 +96,7 @@ void test_vlseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, size_t vl) {
- return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m(
@@ -119,7 +119,7 @@ void test_vlseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, size_t vl) {
- return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m(
@@ -142,6 +142,6 @@ void test_vlseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, size_t vl) {
- return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c
index d9716505e154..62e009829e5c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m(
@@ -54,7 +54,7 @@ void test_vlseg7e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m(
@@ -79,7 +79,7 @@ void test_vlseg7e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m(
@@ -104,7 +104,7 @@ void test_vlseg7e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m(
@@ -129,7 +129,7 @@ void test_vlseg7e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m(
@@ -154,6 +154,6 @@ void test_vlseg7e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64.c
index 9c04c7194d22..a94920fea26d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, size_t vl) {
- return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m(
@@ -50,7 +50,7 @@ void test_vlseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, size_t vl) {
- return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m(
@@ -73,6 +73,6 @@ void test_vlseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, size_t vl) {
- return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c
index 1bff34f70990..a645117b9972 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m(
@@ -54,7 +54,7 @@ void test_vlseg7e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m(
@@ -79,6 +79,6 @@ void test_vlseg7e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8.c
index 4eca5018959e..4aaad1393376 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8.c
@@ -26,7 +26,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, size_t vl) {
- return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m(
@@ -49,7 +49,7 @@ void test_vlseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, size_t vl) {
- return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m(
@@ -72,7 +72,7 @@ void test_vlseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, size_t vl) {
- return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m(
@@ -95,7 +95,7 @@ void test_vlseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, size_t vl) {
- return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m(
@@ -118,7 +118,7 @@ void test_vlseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, size_t vl) {
- return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m(
@@ -141,7 +141,7 @@ void test_vlseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, size_t vl) {
- return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m(
@@ -164,7 +164,7 @@ void test_vlseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, size_t vl) {
- return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m(
@@ -187,6 +187,6 @@ void test_vlseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, size_t vl) {
- return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
+ return __riscv_vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c
index 7bd17f583dbf..c218fb25a0f4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m(
@@ -54,7 +54,7 @@ void test_vlseg7e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m(
@@ -79,7 +79,7 @@ void test_vlseg7e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m(
@@ -104,7 +104,7 @@ void test_vlseg7e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m(
@@ -129,7 +129,7 @@ void test_vlseg7e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m(
@@ -154,7 +154,7 @@ void test_vlseg7e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m(
@@ -179,7 +179,7 @@ void test_vlseg7e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m(
@@ -204,6 +204,6 @@ void test_vlseg7e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
+ return __riscv_vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16.c
index 9be8600467dd..f9e327d91731 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, size_t vl) {
- return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m(
@@ -54,7 +54,7 @@ void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, size_t vl) {
- return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m(
@@ -79,7 +79,7 @@ void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, size_t vl) {
- return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m(
@@ -104,7 +104,7 @@ void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, size_t vl) {
- return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m(
@@ -129,7 +129,7 @@ void test_vlseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, size_t vl) {
- return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m(
@@ -154,7 +154,7 @@ void test_vlseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, size_t vl) {
- return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m(
@@ -179,7 +179,7 @@ void test_vlseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, size_t vl) {
- return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m(
@@ -204,7 +204,7 @@ void test_vlseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, size_t vl) {
- return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m(
@@ -229,6 +229,6 @@ void test_vlseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, size_t vl) {
- return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c
index d8e76e61dcaa..fb37c770e9a8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c
@@ -31,7 +31,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_m(
@@ -58,7 +58,7 @@ void test_vlseg8e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_m(
@@ -85,7 +85,7 @@ void test_vlseg8e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m(
@@ -112,7 +112,7 @@ void test_vlseg8e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m(
@@ -139,7 +139,7 @@ void test_vlseg8e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m(
@@ -166,7 +166,7 @@ void test_vlseg8e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m(
@@ -193,7 +193,7 @@ void test_vlseg8e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m(
@@ -220,7 +220,7 @@ void test_vlseg8e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m(
@@ -247,6 +247,6 @@ void test_vlseg8e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32.c
index a4d1a9d9fab8..e4f0ed452346 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, size_t vl) {
- return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m(
@@ -54,7 +54,7 @@ void test_vlseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, size_t vl) {
- return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m(
@@ -79,7 +79,7 @@ void test_vlseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, size_t vl) {
- return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m(
@@ -104,7 +104,7 @@ void test_vlseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, size_t vl) {
- return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m(
@@ -129,7 +129,7 @@ void test_vlseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, size_t vl) {
- return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m(
@@ -154,6 +154,6 @@ void test_vlseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, size_t vl) {
- return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c
index 05a85e13d79d..7045f8394cf2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c
@@ -31,7 +31,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m(
@@ -58,7 +58,7 @@ void test_vlseg8e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m(
@@ -85,7 +85,7 @@ void test_vlseg8e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m(
@@ -112,7 +112,7 @@ void test_vlseg8e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m(
@@ -139,7 +139,7 @@ void test_vlseg8e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m(
@@ -166,6 +166,6 @@ void test_vlseg8e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64.c
index 8262243d29de..44572223350c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, size_t vl) {
- return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m(
@@ -54,7 +54,7 @@ void test_vlseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, size_t vl) {
- return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m(
@@ -79,6 +79,6 @@ void test_vlseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, size_t vl) {
- return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c
index 9b903fc3246a..986796b3f35a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c
@@ -31,7 +31,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m(
@@ -58,7 +58,7 @@ void test_vlseg8e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m(
@@ -85,6 +85,6 @@ void test_vlseg8e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8.c
index 31596e60845e..424b727b36ac 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8.c
@@ -28,7 +28,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, size_t vl) {
- return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m(
@@ -53,7 +53,7 @@ void test_vlseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, size_t vl) {
- return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m(
@@ -78,7 +78,7 @@ void test_vlseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, size_t vl) {
- return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m(
@@ -103,7 +103,7 @@ void test_vlseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, size_t vl) {
- return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m(
@@ -128,7 +128,7 @@ void test_vlseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, size_t vl) {
- return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m(
@@ -153,7 +153,7 @@ void test_vlseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, size_t vl) {
- return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m(
@@ -178,7 +178,7 @@ void test_vlseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, size_t vl) {
- return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m(
@@ -203,6 +203,6 @@ void test_vlseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, size_t vl) {
- return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
+ return __riscv_vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c
index 987e3b9e7e90..3405c9ef6590 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c
@@ -31,7 +31,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m(
@@ -58,7 +58,7 @@ void test_vlseg8e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m(
@@ -85,7 +85,7 @@ void test_vlseg8e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m(
@@ -112,7 +112,7 @@ void test_vlseg8e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m(
@@ -139,7 +139,7 @@ void test_vlseg8e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m(
@@ -166,7 +166,7 @@ void test_vlseg8e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m(
@@ -193,7 +193,7 @@ void test_vlseg8e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m(
@@ -220,6 +220,6 @@ void test_vlseg8e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
+ return __riscv_vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c
index 02eb785cd8d0..11d405342c36 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_m(
@@ -30,7 +30,7 @@ void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_m(
@@ -43,7 +43,7 @@ void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_m(
@@ -56,7 +56,7 @@ void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_m(
@@ -69,7 +69,7 @@ void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_m(
@@ -82,7 +82,7 @@ void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_m(
@@ -95,7 +95,7 @@ void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_m(
@@ -108,7 +108,7 @@ void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_m(
@@ -121,7 +121,7 @@ void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_m(
@@ -134,7 +134,7 @@ void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_m(
@@ -147,7 +147,7 @@ void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_m(
@@ -160,7 +160,7 @@ void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_m(
@@ -173,7 +173,7 @@ void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_m(
@@ -186,7 +186,7 @@ void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_m(
@@ -199,6 +199,6 @@ void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c
index 0c19c24c97bd..965a34a5d9be 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_m(
@@ -30,7 +30,7 @@ void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_m(
@@ -43,7 +43,7 @@ void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_m(
@@ -56,7 +56,7 @@ void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_m(
@@ -69,7 +69,7 @@ void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_m(
@@ -82,7 +82,7 @@ void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_m(
@@ -95,7 +95,7 @@ void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_m(
@@ -108,7 +108,7 @@ void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_m(
@@ -121,7 +121,7 @@ void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_m(
@@ -134,7 +134,7 @@ void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_m(
@@ -147,7 +147,7 @@ void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_m(
@@ -160,6 +160,6 @@ void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c
index 011254ec333b..9f1b3e2ca04d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_m(
@@ -30,7 +30,7 @@ void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_m(
@@ -43,7 +43,7 @@ void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_m(
@@ -56,7 +56,7 @@ void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_m(
@@ -69,7 +69,7 @@ void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_m(
@@ -82,7 +82,7 @@ void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_m(
@@ -95,7 +95,7 @@ void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_m(
@@ -108,7 +108,7 @@ void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_m(
@@ -121,6 +121,6 @@ void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c
index 0134b874f512..e9756f41d373 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_m(
@@ -29,7 +29,7 @@ void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_m(
@@ -42,7 +42,7 @@ void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_m(
@@ -55,7 +55,7 @@ void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_m(
@@ -68,7 +68,7 @@ void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_m(
@@ -81,7 +81,7 @@ void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_m(
@@ -94,7 +94,7 @@ void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_m(
@@ -107,7 +107,7 @@ void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_m(
@@ -120,7 +120,7 @@ void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_m(
@@ -133,7 +133,7 @@ void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_m(
@@ -146,7 +146,7 @@ void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, cons
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_m(
@@ -159,6 +159,6 @@ void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, cons
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8(v0, v1, mask, base, bstride, vl);
+ return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c
index 38f9ed1ae422..3d01c9e184c8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_m(
@@ -34,7 +34,7 @@ void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_m(
@@ -49,7 +49,7 @@ void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_m(
@@ -64,7 +64,7 @@ void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_m(
@@ -79,7 +79,7 @@ void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_m(
@@ -94,7 +94,7 @@ void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_m(
@@ -109,7 +109,7 @@ void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_m(
@@ -124,7 +124,7 @@ void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_m(
@@ -139,7 +139,7 @@ void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_m(
@@ -154,7 +154,7 @@ void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_m(
@@ -169,7 +169,7 @@ void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_m(
@@ -184,6 +184,6 @@ void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c
index 073639a074fc..b920768b23f1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_m(
@@ -34,7 +34,7 @@ void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_m(
@@ -49,7 +49,7 @@ void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_m(
@@ -64,7 +64,7 @@ void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_m(
@@ -79,7 +79,7 @@ void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_m(
@@ -94,7 +94,7 @@ void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_m(
@@ -109,7 +109,7 @@ void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_m(
@@ -124,7 +124,7 @@ void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_m(
@@ -139,6 +139,6 @@ void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c
index 325738e5f870..1fdcf0f66e3f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_m(
@@ -34,7 +34,7 @@ void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_m(
@@ -64,7 +64,7 @@ void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_m(
@@ -79,7 +79,7 @@ void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_m(
@@ -94,6 +94,6 @@ void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c
index 2209bbbe0752..0b927a1ab870 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c
@@ -18,7 +18,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_m(
@@ -33,7 +33,7 @@ void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_m(
@@ -48,7 +48,7 @@ void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_m(
@@ -63,7 +63,7 @@ void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_m(
@@ -78,7 +78,7 @@ void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_m(
@@ -93,7 +93,7 @@ void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_m(
@@ -108,7 +108,7 @@ void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_m(
@@ -123,7 +123,7 @@ void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_m(
@@ -138,7 +138,7 @@ void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_m(
@@ -153,6 +153,6 @@ void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
+ return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c
index c1f68ff1e87a..a4fd339e742b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_m(
@@ -38,7 +38,7 @@ void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_m(
@@ -55,7 +55,7 @@ void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_m(
@@ -72,7 +72,7 @@ void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_m(
@@ -89,7 +89,7 @@ void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_m(
@@ -106,7 +106,7 @@ void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_m(
@@ -123,7 +123,7 @@ void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_m(
@@ -140,7 +140,7 @@ void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_m(
@@ -157,7 +157,7 @@ void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_m(
@@ -174,7 +174,7 @@ void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_m(
@@ -191,7 +191,7 @@ void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_m(
@@ -208,6 +208,6 @@ void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c
index 92b5350e8b35..9eb205abd42a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_m(
@@ -38,7 +38,7 @@ void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_m(
@@ -55,7 +55,7 @@ void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_m(
@@ -72,7 +72,7 @@ void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_m(
@@ -89,7 +89,7 @@ void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_m(
@@ -106,7 +106,7 @@ void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_m(
@@ -123,7 +123,7 @@ void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_m(
@@ -140,7 +140,7 @@ void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_m(
@@ -157,6 +157,6 @@ void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c
index 210360a8873f..c98c4d5f675f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_m(
@@ -38,7 +38,7 @@ void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_m(
@@ -55,7 +55,7 @@ void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_m(
@@ -72,7 +72,7 @@ void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_m(
@@ -89,7 +89,7 @@ void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_m(
@@ -106,6 +106,6 @@ void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c
index f99e9ec4725e..7b5a993f68e2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c
@@ -20,7 +20,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_m(
@@ -37,7 +37,7 @@ void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_m(
@@ -54,7 +54,7 @@ void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_m(
@@ -71,7 +71,7 @@ void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_m(
@@ -88,7 +88,7 @@ void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_m(
@@ -105,7 +105,7 @@ void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_m(
@@ -122,7 +122,7 @@ void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_m(
@@ -139,7 +139,7 @@ void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_m(
@@ -156,7 +156,7 @@ void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_m(
@@ -173,6 +173,6 @@ void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
+ return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c
index 378e4ca52338..64017d630393 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_m(
@@ -42,7 +42,7 @@ void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_m(
@@ -61,7 +61,7 @@ void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_m(
@@ -80,7 +80,7 @@ void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_m(
@@ -99,7 +99,7 @@ void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_m(
@@ -118,7 +118,7 @@ void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_m(
@@ -137,7 +137,7 @@ void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_m(
@@ -156,7 +156,7 @@ void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_m(
@@ -175,6 +175,6 @@ void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c
index 75c899d8adc0..d0dd1d19a0cb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_m(
@@ -42,7 +42,7 @@ void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_m(
@@ -61,7 +61,7 @@ void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_m(
@@ -80,7 +80,7 @@ void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_m(
@@ -99,7 +99,7 @@ void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_m(
@@ -118,6 +118,6 @@ void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c
index 24110c148f11..2c438645ce39 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_m(
@@ -42,7 +42,7 @@ void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_m(
@@ -61,6 +61,6 @@ void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c
index 4093d51d05e1..db1e76e11bb8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c
@@ -22,7 +22,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_m(
@@ -41,7 +41,7 @@ void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_m(
@@ -60,7 +60,7 @@ void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_m(
@@ -79,7 +79,7 @@ void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_m(
@@ -98,7 +98,7 @@ void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_m(
@@ -117,7 +117,7 @@ void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_m(
@@ -136,7 +136,7 @@ void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_m(
@@ -155,6 +155,6 @@ void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
+ return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c
index 2fcc14d0203b..31f3762298b7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_m(
@@ -46,7 +46,7 @@ void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_m(
@@ -67,7 +67,7 @@ void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_m(
@@ -88,7 +88,7 @@ void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_m(
@@ -109,7 +109,7 @@ void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_m(
@@ -130,7 +130,7 @@ void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_m(
@@ -151,7 +151,7 @@ void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_m(
@@ -172,7 +172,7 @@ void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_m(
@@ -193,6 +193,6 @@ void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c
index c41dd0f8714c..f4506ef4c993 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_m(
@@ -46,7 +46,7 @@ void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_m(
@@ -67,7 +67,7 @@ void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_m(
@@ -88,7 +88,7 @@ void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_m(
@@ -109,7 +109,7 @@ void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_m(
@@ -130,6 +130,6 @@ void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c
index 06f3dac17869..8d617cb86d8e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_m(
@@ -46,7 +46,7 @@ void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_m(
@@ -67,6 +67,6 @@ void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c
index b0cd06ff4e43..ec1c09032636 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c
@@ -24,7 +24,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_m(
@@ -45,7 +45,7 @@ void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_m(
@@ -66,7 +66,7 @@ void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_m(
@@ -87,7 +87,7 @@ void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_m(
@@ -108,7 +108,7 @@ void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_m(
@@ -150,7 +150,7 @@ void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_m(
@@ -171,6 +171,6 @@ void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
+ return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c
index ae5fca61c059..344d95443a78 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_m(
@@ -50,7 +50,7 @@ void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_m(
@@ -73,7 +73,7 @@ void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_m(
@@ -96,7 +96,7 @@ void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_m(
@@ -119,7 +119,7 @@ void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_m(
@@ -142,7 +142,7 @@ void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_m(
@@ -165,7 +165,7 @@ void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_m(
@@ -188,7 +188,7 @@ void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_m(
@@ -211,6 +211,6 @@ void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c
index fc5e1da12557..9d4e608c37b3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_m(
@@ -50,7 +50,7 @@ void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_m(
@@ -73,7 +73,7 @@ void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_m(
@@ -96,7 +96,7 @@ void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_m(
@@ -119,7 +119,7 @@ void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_m(
@@ -142,6 +142,6 @@ void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c
index c994bb75e3b6..c64b1a080eb5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_m(
@@ -50,7 +50,7 @@ void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_m(
@@ -73,6 +73,6 @@ void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c
index 531337bff1ed..34828f4f5548 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c
@@ -26,7 +26,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_m(
@@ -49,7 +49,7 @@ void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_m(
@@ -72,7 +72,7 @@ void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_m(
@@ -95,7 +95,7 @@ void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_m(
@@ -118,7 +118,7 @@ void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_m(
@@ -141,7 +141,7 @@ void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_m(
@@ -164,7 +164,7 @@ void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_m(
@@ -187,6 +187,6 @@ void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
+ return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c
index ad2a3de33d61..f8307089edcb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_m(
@@ -54,7 +54,7 @@ void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_m(
@@ -79,7 +79,7 @@ void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_m(
@@ -104,7 +104,7 @@ void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_m(
@@ -129,7 +129,7 @@ void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_m(
@@ -154,7 +154,7 @@ void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_m(
@@ -179,7 +179,7 @@ void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_m(
@@ -204,7 +204,7 @@ void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_m(
@@ -229,6 +229,6 @@ void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c
index 51917188996e..08880776ae5a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_m(
@@ -54,7 +54,7 @@ void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_m(
@@ -79,7 +79,7 @@ void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_m(
@@ -104,7 +104,7 @@ void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_m(
@@ -129,7 +129,7 @@ void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_m(
@@ -154,6 +154,6 @@ void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c
index d78f7f159c43..ad077165eb4c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_m(
@@ -54,7 +54,7 @@ void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_m(
@@ -79,6 +79,6 @@ void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c
index e238b8a22478..a4c858ad8b0f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c
@@ -28,7 +28,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_m(
@@ -53,7 +53,7 @@ void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_m(
@@ -78,7 +78,7 @@ void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_m(
@@ -103,7 +103,7 @@ void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_m(
@@ -128,7 +128,7 @@ void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_m(
@@ -153,7 +153,7 @@ void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_m(
@@ -178,7 +178,7 @@ void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_m(
@@ -203,6 +203,6 @@ void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
+ return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei16.c
index 576ea928c54c..7c9337ff6314 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vluxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4(
@@ -157,7 +157,7 @@ vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2(
@@ -166,7 +166,7 @@ vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1(
@@ -175,7 +175,7 @@ vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2(
@@ -184,7 +184,7 @@ vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4(
@@ -193,7 +193,7 @@ vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4(
@@ -202,7 +202,7 @@ vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2(
@@ -211,7 +211,7 @@ vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1(
@@ -220,7 +220,7 @@ vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2(
@@ -229,7 +229,7 @@ vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4(
@@ -238,7 +238,7 @@ vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8(
@@ -247,7 +247,7 @@ vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2(
@@ -256,7 +256,7 @@ vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1(
@@ -265,7 +265,7 @@ vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2(
@@ -274,7 +274,7 @@ vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4(
@@ -283,7 +283,7 @@ vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8(
@@ -292,7 +292,7 @@ vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1(
@@ -301,7 +301,7 @@ vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2(
@@ -310,7 +310,7 @@ vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4(
@@ -319,7 +319,7 @@ vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8(
@@ -328,7 +328,7 @@ vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8(
@@ -337,7 +337,7 @@ vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4(
@@ -346,7 +346,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2(
@@ -355,7 +355,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1(
@@ -364,7 +364,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2(
@@ -373,7 +373,7 @@ vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4(
@@ -382,7 +382,7 @@ vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4(
@@ -391,7 +391,7 @@ vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2(
@@ -400,7 +400,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1(
@@ -409,7 +409,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2(
@@ -418,7 +418,7 @@ vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4(
@@ -427,7 +427,7 @@ vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8(
@@ -436,7 +436,7 @@ vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2(
@@ -445,7 +445,7 @@ vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1(
@@ -454,7 +454,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2(
@@ -463,7 +463,7 @@ vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4(
@@ -472,7 +472,7 @@ vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8(
@@ -481,7 +481,7 @@ vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1(
@@ -490,7 +490,7 @@ vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2(
@@ -499,7 +499,7 @@ vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4(
@@ -508,7 +508,7 @@ vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8(
@@ -517,7 +517,7 @@ vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(base, bindex, vl);
+ return __riscv_vluxei16(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m(
@@ -526,7 +526,7 @@ vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m(
@@ -535,7 +535,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m(
@@ -544,7 +544,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m(
@@ -553,7 +553,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m(
@@ -562,7 +562,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m(
@@ -571,7 +571,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m(
@@ -580,7 +580,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m(
@@ -589,7 +589,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m(
@@ -598,7 +598,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m(
@@ -607,7 +607,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m(
@@ -616,7 +616,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m(
@@ -625,7 +625,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m(
@@ -634,7 +634,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m(
@@ -643,7 +643,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m(
@@ -652,7 +652,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m(
@@ -661,7 +661,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m(
@@ -670,7 +670,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m(
@@ -679,7 +679,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m(
@@ -688,7 +688,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m(
@@ -697,7 +697,7 @@ vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m(
@@ -706,7 +706,7 @@ vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m(
@@ -715,7 +715,7 @@ vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m(
@@ -724,7 +724,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m(
@@ -733,7 +733,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m(
@@ -742,7 +742,7 @@ vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m(
@@ -751,7 +751,7 @@ vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m(
@@ -760,7 +760,7 @@ vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m(
@@ -769,7 +769,7 @@ vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m(
@@ -778,7 +778,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m(
@@ -787,7 +787,7 @@ vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m(
@@ -796,7 +796,7 @@ vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m(
@@ -805,7 +805,7 @@ vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m(
@@ -814,7 +814,7 @@ vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m(
@@ -823,7 +823,7 @@ vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m(
@@ -832,7 +832,7 @@ vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m(
@@ -841,7 +841,7 @@ vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m(
@@ -850,7 +850,7 @@ vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m(
@@ -859,7 +859,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m(
@@ -868,7 +868,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m(
@@ -877,7 +877,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m(
@@ -886,7 +886,7 @@ vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m(
@@ -895,7 +895,7 @@ vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m(
@@ -904,7 +904,7 @@ vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m(
@@ -913,7 +913,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m(
@@ -922,7 +922,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m(
@@ -931,7 +931,7 @@ vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m(
@@ -940,7 +940,7 @@ vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m(
@@ -949,7 +949,7 @@ vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m(
@@ -958,7 +958,7 @@ vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m(
@@ -967,7 +967,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m(
@@ -976,7 +976,7 @@ vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m(
@@ -985,7 +985,7 @@ vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m(
@@ -994,7 +994,7 @@ vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m(
@@ -1003,7 +1003,7 @@ vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m(
@@ -1012,7 +1012,7 @@ vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m(
@@ -1021,7 +1021,7 @@ vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m(
@@ -1030,6 +1030,6 @@ vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16(mask, base, bindex, vl);
+ return __riscv_vluxei16(mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei32.c
index fdba1967ef15..c4f4a6cee1f8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1(
@@ -67,7 +67,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4(
@@ -85,7 +85,7 @@ vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8(
@@ -94,7 +94,7 @@ vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4(
@@ -121,7 +121,7 @@ vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8(
@@ -130,7 +130,7 @@ vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8(
@@ -139,7 +139,7 @@ vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4(
@@ -148,7 +148,7 @@ vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2(
@@ -157,7 +157,7 @@ vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1(
@@ -166,7 +166,7 @@ vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2(
@@ -175,7 +175,7 @@ vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4(
@@ -184,7 +184,7 @@ vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2(
@@ -193,7 +193,7 @@ vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1(
@@ -202,7 +202,7 @@ vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2(
@@ -211,7 +211,7 @@ vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4(
@@ -220,7 +220,7 @@ vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2(
@@ -229,7 +229,7 @@ vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1(
@@ -238,7 +238,7 @@ vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2(
@@ -247,7 +247,7 @@ vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4(
@@ -256,7 +256,7 @@ vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8(
@@ -265,7 +265,7 @@ vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1(
@@ -274,7 +274,7 @@ vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2(
@@ -283,7 +283,7 @@ vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4(
@@ -292,7 +292,7 @@ vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8(
@@ -301,7 +301,7 @@ vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8(
@@ -310,7 +310,7 @@ vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4(
@@ -319,7 +319,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2(
@@ -328,7 +328,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1(
@@ -337,7 +337,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2(
@@ -346,7 +346,7 @@ vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4(
@@ -355,7 +355,7 @@ vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2(
@@ -364,7 +364,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1(
@@ -373,7 +373,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2(
@@ -382,7 +382,7 @@ vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4(
@@ -391,7 +391,7 @@ vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2(
@@ -400,7 +400,7 @@ vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1(
@@ -409,7 +409,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2(
@@ -418,7 +418,7 @@ vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4(
@@ -427,7 +427,7 @@ vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8(
@@ -436,7 +436,7 @@ vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1(
@@ -445,7 +445,7 @@ vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2(
@@ -454,7 +454,7 @@ vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4(
@@ -463,7 +463,7 @@ vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8(
@@ -472,7 +472,7 @@ vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(base, bindex, vl);
+ return __riscv_vluxei32(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m(
@@ -481,7 +481,7 @@ vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m(
@@ -490,7 +490,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m(
@@ -499,7 +499,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m(
@@ -508,7 +508,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m(
@@ -517,7 +517,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m(
@@ -526,7 +526,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m(
@@ -535,7 +535,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m(
@@ -544,7 +544,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m(
@@ -553,7 +553,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m(
@@ -562,7 +562,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m(
@@ -571,7 +571,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m(
@@ -580,7 +580,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m(
@@ -589,7 +589,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m(
@@ -598,7 +598,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m(
@@ -607,7 +607,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m(
@@ -616,7 +616,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m(
@@ -625,7 +625,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m(
@@ -634,7 +634,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m(
@@ -643,7 +643,7 @@ vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m(
@@ -652,7 +652,7 @@ vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m(
@@ -661,7 +661,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m(
@@ -670,7 +670,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m(
@@ -679,7 +679,7 @@ vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m(
@@ -688,7 +688,7 @@ vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m(
@@ -697,7 +697,7 @@ vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m(
@@ -706,7 +706,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m(
@@ -715,7 +715,7 @@ vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m(
@@ -724,7 +724,7 @@ vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m(
@@ -733,7 +733,7 @@ vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m(
@@ -742,7 +742,7 @@ vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m(
@@ -751,7 +751,7 @@ vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m(
@@ -760,7 +760,7 @@ vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m(
@@ -769,7 +769,7 @@ vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m(
@@ -778,7 +778,7 @@ vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m(
@@ -787,7 +787,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m(
@@ -796,7 +796,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m(
@@ -805,7 +805,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m(
@@ -814,7 +814,7 @@ vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m(
@@ -823,7 +823,7 @@ vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m(
@@ -832,7 +832,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m(
@@ -841,7 +841,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m(
@@ -850,7 +850,7 @@ vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m(
@@ -859,7 +859,7 @@ vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m(
@@ -868,7 +868,7 @@ vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m(
@@ -877,7 +877,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m(
@@ -886,7 +886,7 @@ vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m(
@@ -895,7 +895,7 @@ vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m(
@@ -904,7 +904,7 @@ vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m(
@@ -913,7 +913,7 @@ vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m(
@@ -922,7 +922,7 @@ vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m(
@@ -931,7 +931,7 @@ vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m(
@@ -940,6 +940,6 @@ vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32(mask, base, bindex, vl);
+ return __riscv_vluxei32(mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei64.c
index bb25cf1a941c..10ce1c387a3c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1(
@@ -58,7 +58,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4(
@@ -76,7 +76,7 @@ vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2(
@@ -94,7 +94,7 @@ vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4(
@@ -103,7 +103,7 @@ vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8(
@@ -112,7 +112,7 @@ vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8(
@@ -121,7 +121,7 @@ vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4(
@@ -130,7 +130,7 @@ vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2(
@@ -139,7 +139,7 @@ vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1(
@@ -148,7 +148,7 @@ vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4(
@@ -157,7 +157,7 @@ vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2(
@@ -166,7 +166,7 @@ vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1(
@@ -175,7 +175,7 @@ vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2(
@@ -184,7 +184,7 @@ vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2(
@@ -193,7 +193,7 @@ vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1(
@@ -202,7 +202,7 @@ vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2(
@@ -211,7 +211,7 @@ vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4(
@@ -220,7 +220,7 @@ vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1(
@@ -229,7 +229,7 @@ vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2(
@@ -238,7 +238,7 @@ vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4(
@@ -247,7 +247,7 @@ vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8(
@@ -256,7 +256,7 @@ vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8(
@@ -265,7 +265,7 @@ vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4(
@@ -274,7 +274,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2(
@@ -283,7 +283,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1(
@@ -292,7 +292,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4(
@@ -301,7 +301,7 @@ vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2(
@@ -310,7 +310,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1(
@@ -319,7 +319,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2(
@@ -328,7 +328,7 @@ vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2(
@@ -337,7 +337,7 @@ vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1(
@@ -346,7 +346,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2(
@@ -355,7 +355,7 @@ vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4(
@@ -364,7 +364,7 @@ vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1(
@@ -373,7 +373,7 @@ vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2(
@@ -382,7 +382,7 @@ vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4(
@@ -391,7 +391,7 @@ vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8(
@@ -400,7 +400,7 @@ vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(base, bindex, vl);
+ return __riscv_vluxei64(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m(
@@ -409,7 +409,7 @@ vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m(
@@ -418,7 +418,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m(
@@ -427,7 +427,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m(
@@ -436,7 +436,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m(
@@ -445,7 +445,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m(
@@ -454,7 +454,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m(
@@ -463,7 +463,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m(
@@ -472,7 +472,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m(
@@ -481,7 +481,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m(
@@ -499,7 +499,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m(
@@ -508,7 +508,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m(
@@ -517,7 +517,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m(
@@ -526,7 +526,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m(
@@ -535,7 +535,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m(
@@ -544,7 +544,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m(
@@ -553,7 +553,7 @@ vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m(
@@ -562,7 +562,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m(
@@ -571,7 +571,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m(
@@ -580,7 +580,7 @@ vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m(
@@ -589,7 +589,7 @@ vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m(
@@ -598,7 +598,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m(
@@ -607,7 +607,7 @@ vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m(
@@ -616,7 +616,7 @@ vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m(
@@ -625,7 +625,7 @@ vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m(
@@ -634,7 +634,7 @@ vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m(
@@ -643,7 +643,7 @@ vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m(
@@ -652,7 +652,7 @@ vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m(
@@ -661,7 +661,7 @@ vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m(
@@ -670,7 +670,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m(
@@ -679,7 +679,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m(
@@ -688,7 +688,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m(
@@ -697,7 +697,7 @@ vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m(
@@ -706,7 +706,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m(
@@ -715,7 +715,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m(
@@ -724,7 +724,7 @@ vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m(
@@ -733,7 +733,7 @@ vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m(
@@ -742,7 +742,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m(
@@ -751,7 +751,7 @@ vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m(
@@ -760,7 +760,7 @@ vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m(
@@ -769,7 +769,7 @@ vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m(
@@ -778,7 +778,7 @@ vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m(
@@ -787,7 +787,7 @@ vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m(
@@ -796,6 +796,6 @@ vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64(mask, base, bindex, vl);
+ return __riscv_vluxei64(mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei8.c
index 68d5be1561ad..c59e71cba8f3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, si
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vluxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4(
@@ -157,7 +157,7 @@ vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2(
@@ -166,7 +166,7 @@ vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1(
@@ -175,7 +175,7 @@ vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2(
@@ -184,7 +184,7 @@ vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4(
@@ -193,7 +193,7 @@ vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8(
@@ -202,7 +202,7 @@ vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4(
@@ -211,7 +211,7 @@ vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2(
@@ -220,7 +220,7 @@ vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1(
@@ -229,7 +229,7 @@ vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2(
@@ -238,7 +238,7 @@ vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4(
@@ -247,7 +247,7 @@ vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8(
@@ -256,7 +256,7 @@ vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2(
@@ -265,7 +265,7 @@ vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1(
@@ -274,7 +274,7 @@ vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4(
@@ -292,7 +292,7 @@ vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8(
@@ -301,7 +301,7 @@ vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1(
@@ -310,7 +310,7 @@ vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2(
@@ -319,7 +319,7 @@ vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4(
@@ -328,7 +328,7 @@ vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8(
@@ -337,7 +337,7 @@ vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8(
@@ -346,7 +346,7 @@ vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2(
@@ -382,7 +382,7 @@ vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4(
@@ -391,7 +391,7 @@ vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8(
@@ -400,7 +400,7 @@ vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4(
@@ -409,7 +409,7 @@ vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2(
@@ -436,7 +436,7 @@ vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4(
@@ -445,7 +445,7 @@ vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8(
@@ -454,7 +454,7 @@ vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2(
@@ -463,7 +463,7 @@ vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2(
@@ -481,7 +481,7 @@ vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4(
@@ -490,7 +490,7 @@ vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8(
@@ -499,7 +499,7 @@ vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1(
@@ -508,7 +508,7 @@ vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2(
@@ -517,7 +517,7 @@ vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4(
@@ -526,7 +526,7 @@ vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8(
@@ -535,7 +535,7 @@ vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(base, bindex, vl);
+ return __riscv_vluxei8(base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m(
@@ -544,7 +544,7 @@ vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m(
@@ -553,7 +553,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m(
@@ -562,7 +562,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m(
@@ -571,7 +571,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m(
@@ -580,7 +580,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m(
@@ -598,7 +598,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m(
@@ -607,7 +607,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m(
@@ -616,7 +616,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m(
@@ -625,7 +625,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m(
@@ -634,7 +634,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m(
@@ -643,7 +643,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m(
@@ -652,7 +652,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m(
@@ -661,7 +661,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m(
@@ -670,7 +670,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m(
@@ -679,7 +679,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m(
@@ -688,7 +688,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m(
@@ -697,7 +697,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m(
@@ -706,7 +706,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m(
@@ -715,7 +715,7 @@ vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m(
@@ -724,7 +724,7 @@ vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m(
@@ -733,7 +733,7 @@ vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bi
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m(
@@ -742,7 +742,7 @@ vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m(
@@ -751,7 +751,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m(
@@ -760,7 +760,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m(
@@ -769,7 +769,7 @@ vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m(
@@ -778,7 +778,7 @@ vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m(
@@ -787,7 +787,7 @@ vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m(
@@ -796,7 +796,7 @@ vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m(
@@ -805,7 +805,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m(
@@ -814,7 +814,7 @@ vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m(
@@ -823,7 +823,7 @@ vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m(
@@ -832,7 +832,7 @@ vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m(
@@ -841,7 +841,7 @@ vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m(
@@ -850,7 +850,7 @@ vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m(
@@ -859,7 +859,7 @@ vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m(
@@ -868,7 +868,7 @@ vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m(
@@ -877,7 +877,7 @@ vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m(
@@ -886,7 +886,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m(
@@ -895,7 +895,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m(
@@ -904,7 +904,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m(
@@ -913,7 +913,7 @@ vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m(
@@ -922,7 +922,7 @@ vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m(
@@ -931,7 +931,7 @@ vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m(
@@ -940,7 +940,7 @@ vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m(
@@ -967,7 +967,7 @@ vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m(
@@ -976,7 +976,7 @@ vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m(
@@ -985,7 +985,7 @@ vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m(
@@ -994,7 +994,7 @@ vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m(
@@ -1003,7 +1003,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m(
@@ -1012,7 +1012,7 @@ vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m(
@@ -1021,7 +1021,7 @@ vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m(
@@ -1030,7 +1030,7 @@ vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m(
@@ -1039,7 +1039,7 @@ vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m(
@@ -1048,7 +1048,7 @@ vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m(
@@ -1057,7 +1057,7 @@ vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m(
@@ -1066,6 +1066,6 @@ vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8(mask, base, bindex, vl);
+ return __riscv_vluxei8(mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c
index ba4485954b7d..47099e7d0ae0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2(
@@ -30,7 +30,7 @@ void test_vluxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1(
@@ -43,7 +43,7 @@ void test_vluxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2(
@@ -56,7 +56,7 @@ void test_vluxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4(
@@ -69,7 +69,7 @@ void test_vluxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2(
@@ -82,7 +82,7 @@ void test_vluxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1(
@@ -95,7 +95,7 @@ void test_vluxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2(
@@ -108,7 +108,7 @@ void test_vluxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4(
@@ -121,7 +121,7 @@ void test_vluxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1(
@@ -134,7 +134,7 @@ void test_vluxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2(
@@ -147,7 +147,7 @@ void test_vluxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4(
@@ -160,7 +160,7 @@ void test_vluxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8(
@@ -173,7 +173,7 @@ void test_vluxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4(
@@ -186,7 +186,7 @@ void test_vluxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2(
@@ -199,7 +199,7 @@ void test_vluxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1(
@@ -212,7 +212,7 @@ void test_vluxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2(
@@ -225,7 +225,7 @@ void test_vluxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4(
@@ -238,7 +238,7 @@ void test_vluxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4(
@@ -251,7 +251,7 @@ void test_vluxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2(
@@ -264,7 +264,7 @@ void test_vluxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1(
@@ -277,7 +277,7 @@ void test_vluxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2(
@@ -290,7 +290,7 @@ void test_vluxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4(
@@ -303,7 +303,7 @@ void test_vluxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2(
@@ -316,7 +316,7 @@ void test_vluxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1(
@@ -329,7 +329,7 @@ void test_vluxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2(
@@ -342,7 +342,7 @@ void test_vluxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4(
@@ -355,7 +355,7 @@ void test_vluxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1(
@@ -368,7 +368,7 @@ void test_vluxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2(
@@ -381,7 +381,7 @@ void test_vluxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4(
@@ -394,7 +394,7 @@ void test_vluxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8(
@@ -407,7 +407,7 @@ void test_vluxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4(
@@ -420,7 +420,7 @@ void test_vluxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2(
@@ -433,7 +433,7 @@ void test_vluxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1(
@@ -446,7 +446,7 @@ void test_vluxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2(
@@ -459,7 +459,7 @@ void test_vluxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4(
@@ -472,7 +472,7 @@ void test_vluxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4(
@@ -485,7 +485,7 @@ void test_vluxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2(
@@ -498,7 +498,7 @@ void test_vluxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1(
@@ -511,7 +511,7 @@ void test_vluxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2(
@@ -524,7 +524,7 @@ void test_vluxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4(
@@ -537,7 +537,7 @@ void test_vluxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2(
@@ -550,7 +550,7 @@ void test_vluxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1(
@@ -563,7 +563,7 @@ void test_vluxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2(
@@ -576,7 +576,7 @@ void test_vluxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4(
@@ -589,7 +589,7 @@ void test_vluxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1(
@@ -602,7 +602,7 @@ void test_vluxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2(
@@ -615,7 +615,7 @@ void test_vluxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4(
@@ -628,7 +628,7 @@ void test_vluxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m(
@@ -641,7 +641,7 @@ void test_vluxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_m(
@@ -654,7 +654,7 @@ void test_vluxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m(
@@ -667,7 +667,7 @@ void test_vluxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m(
@@ -680,7 +680,7 @@ void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m(
@@ -693,7 +693,7 @@ void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m(
@@ -706,7 +706,7 @@ void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m(
@@ -719,7 +719,7 @@ void test_vluxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_m(
@@ -732,7 +732,7 @@ void test_vluxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_m(
@@ -745,7 +745,7 @@ void test_vluxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_m(
@@ -758,7 +758,7 @@ void test_vluxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_m(
@@ -771,7 +771,7 @@ void test_vluxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_m(
@@ -784,7 +784,7 @@ void test_vluxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m(
@@ -797,7 +797,7 @@ void test_vluxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m(
@@ -810,7 +810,7 @@ void test_vluxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m(
@@ -823,7 +823,7 @@ void test_vluxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m(
@@ -836,7 +836,7 @@ void test_vluxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_m(
@@ -849,7 +849,7 @@ void test_vluxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_m(
@@ -862,7 +862,7 @@ void test_vluxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m(
@@ -875,7 +875,7 @@ void test_vluxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m(
@@ -888,7 +888,7 @@ void test_vluxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vluxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_m(
@@ -914,7 +914,7 @@ void test_vluxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_m(
@@ -927,7 +927,7 @@ void test_vluxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m(
@@ -940,7 +940,7 @@ void test_vluxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m(
@@ -953,7 +953,7 @@ void test_vluxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_m(
@@ -966,7 +966,7 @@ void test_vluxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m(
@@ -979,7 +979,7 @@ void test_vluxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_m(
@@ -992,7 +992,7 @@ void test_vluxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_m(
@@ -1005,7 +1005,7 @@ void test_vluxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_m(
@@ -1018,7 +1018,7 @@ void test_vluxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m(
@@ -1031,7 +1031,7 @@ void test_vluxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m(
@@ -1044,7 +1044,7 @@ void test_vluxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m(
@@ -1057,7 +1057,7 @@ void test_vluxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m(
@@ -1070,7 +1070,7 @@ void test_vluxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_m(
@@ -1083,7 +1083,7 @@ void test_vluxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_m(
@@ -1096,7 +1096,7 @@ void test_vluxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m(
@@ -1109,7 +1109,7 @@ void test_vluxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m(
@@ -1122,7 +1122,7 @@ void test_vluxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m(
@@ -1135,7 +1135,7 @@ void test_vluxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_m(
@@ -1148,7 +1148,7 @@ void test_vluxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m(
@@ -1161,7 +1161,7 @@ void test_vluxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m(
@@ -1174,7 +1174,7 @@ void test_vluxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m(
@@ -1187,7 +1187,7 @@ void test_vluxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_m(
@@ -1200,7 +1200,7 @@ void test_vluxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_m(
@@ -1213,7 +1213,7 @@ void test_vluxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_m(
@@ -1226,7 +1226,7 @@ void test_vluxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_m(
@@ -1239,7 +1239,7 @@ void test_vluxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_m(
@@ -1252,6 +1252,6 @@ void test_vluxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei16(v0, v1, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c
index 4c0c17c97556..f5540e656dbd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2(
@@ -30,7 +30,7 @@ void test_vluxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1(
@@ -43,7 +43,7 @@ void test_vluxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2(
@@ -56,7 +56,7 @@ void test_vluxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4(
@@ -69,7 +69,7 @@ void test_vluxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2(
@@ -82,7 +82,7 @@ void test_vluxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1(
@@ -95,7 +95,7 @@ void test_vluxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2(
@@ -108,7 +108,7 @@ void test_vluxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4(
@@ -121,7 +121,7 @@ void test_vluxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1(
@@ -134,7 +134,7 @@ void test_vluxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2(
@@ -147,7 +147,7 @@ void test_vluxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4(
@@ -160,7 +160,7 @@ void test_vluxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8(
@@ -173,7 +173,7 @@ void test_vluxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4(
@@ -186,7 +186,7 @@ void test_vluxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2(
@@ -199,7 +199,7 @@ void test_vluxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1(
@@ -212,7 +212,7 @@ void test_vluxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2(
@@ -225,7 +225,7 @@ void test_vluxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4(
@@ -238,7 +238,7 @@ void test_vluxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2(
@@ -251,7 +251,7 @@ void test_vluxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1(
@@ -264,7 +264,7 @@ void test_vluxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2(
@@ -277,7 +277,7 @@ void test_vluxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4(
@@ -290,7 +290,7 @@ void test_vluxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2(
@@ -303,7 +303,7 @@ void test_vluxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1(
@@ -316,7 +316,7 @@ void test_vluxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2(
@@ -329,7 +329,7 @@ void test_vluxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4(
@@ -342,7 +342,7 @@ void test_vluxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1(
@@ -355,7 +355,7 @@ void test_vluxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2(
@@ -368,7 +368,7 @@ void test_vluxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4(
@@ -381,7 +381,7 @@ void test_vluxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8(
@@ -394,7 +394,7 @@ void test_vluxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4(
@@ -407,7 +407,7 @@ void test_vluxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2(
@@ -420,7 +420,7 @@ void test_vluxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1(
@@ -433,7 +433,7 @@ void test_vluxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2(
@@ -446,7 +446,7 @@ void test_vluxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4(
@@ -459,7 +459,7 @@ void test_vluxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2(
@@ -472,7 +472,7 @@ void test_vluxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1(
@@ -485,7 +485,7 @@ void test_vluxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2(
@@ -498,7 +498,7 @@ void test_vluxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4(
@@ -511,7 +511,7 @@ void test_vluxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2(
@@ -524,7 +524,7 @@ void test_vluxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1(
@@ -537,7 +537,7 @@ void test_vluxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2(
@@ -550,7 +550,7 @@ void test_vluxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4(
@@ -563,7 +563,7 @@ void test_vluxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1(
@@ -576,7 +576,7 @@ void test_vluxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2(
@@ -589,7 +589,7 @@ void test_vluxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4(
@@ -602,7 +602,7 @@ void test_vluxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m(
@@ -615,7 +615,7 @@ void test_vluxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_m(
@@ -628,7 +628,7 @@ void test_vluxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m(
@@ -641,7 +641,7 @@ void test_vluxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m(
@@ -654,7 +654,7 @@ void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m(
@@ -667,7 +667,7 @@ void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m(
@@ -680,7 +680,7 @@ void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m(
@@ -693,7 +693,7 @@ void test_vluxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_m(
@@ -706,7 +706,7 @@ void test_vluxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_m(
@@ -719,7 +719,7 @@ void test_vluxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_m(
@@ -732,7 +732,7 @@ void test_vluxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_m(
@@ -745,7 +745,7 @@ void test_vluxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_m(
@@ -758,7 +758,7 @@ void test_vluxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m(
@@ -771,7 +771,7 @@ void test_vluxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m(
@@ -784,7 +784,7 @@ void test_vluxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m(
@@ -797,7 +797,7 @@ void test_vluxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m(
@@ -810,7 +810,7 @@ void test_vluxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_m(
@@ -823,7 +823,7 @@ void test_vluxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m(
@@ -836,7 +836,7 @@ void test_vluxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m(
@@ -849,7 +849,7 @@ void test_vluxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m(
@@ -862,7 +862,7 @@ void test_vluxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_m(
@@ -875,7 +875,7 @@ void test_vluxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_m(
@@ -888,7 +888,7 @@ void test_vluxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m(
@@ -901,7 +901,7 @@ void test_vluxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m(
@@ -914,7 +914,7 @@ void test_vluxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_m(
@@ -927,7 +927,7 @@ void test_vluxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_m(
@@ -940,7 +940,7 @@ void test_vluxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_m(
@@ -953,7 +953,7 @@ void test_vluxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_m(
@@ -966,7 +966,7 @@ void test_vluxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_m(
@@ -979,7 +979,7 @@ void test_vluxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m(
@@ -992,7 +992,7 @@ void test_vluxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m(
@@ -1005,7 +1005,7 @@ void test_vluxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m(
@@ -1018,7 +1018,7 @@ void test_vluxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m(
@@ -1031,7 +1031,7 @@ void test_vluxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m(
@@ -1044,7 +1044,7 @@ void test_vluxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m(
@@ -1057,7 +1057,7 @@ void test_vluxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m(
@@ -1070,7 +1070,7 @@ void test_vluxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m(
@@ -1083,7 +1083,7 @@ void test_vluxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_m(
@@ -1096,7 +1096,7 @@ void test_vluxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_m(
@@ -1109,7 +1109,7 @@ void test_vluxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m(
@@ -1122,7 +1122,7 @@ void test_vluxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m(
@@ -1135,7 +1135,7 @@ void test_vluxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_m(
@@ -1148,7 +1148,7 @@ void test_vluxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_m(
@@ -1161,7 +1161,7 @@ void test_vluxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_m(
@@ -1174,7 +1174,7 @@ void test_vluxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_m(
@@ -1187,7 +1187,7 @@ void test_vluxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_m(
@@ -1200,6 +1200,6 @@ void test_vluxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei32(v0, v1, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c
index b734849adef6..a4004ea4ae44 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2(
@@ -30,7 +30,7 @@ void test_vluxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1(
@@ -43,7 +43,7 @@ void test_vluxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2(
@@ -56,7 +56,7 @@ void test_vluxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2(
@@ -69,7 +69,7 @@ void test_vluxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1(
@@ -82,7 +82,7 @@ void test_vluxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2(
@@ -95,7 +95,7 @@ void test_vluxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4(
@@ -108,7 +108,7 @@ void test_vluxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1(
@@ -121,7 +121,7 @@ void test_vluxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2(
@@ -134,7 +134,7 @@ void test_vluxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4(
@@ -147,7 +147,7 @@ void test_vluxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8(
@@ -160,7 +160,7 @@ void test_vluxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4(
@@ -173,7 +173,7 @@ void test_vluxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2(
@@ -186,7 +186,7 @@ void test_vluxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1(
@@ -199,7 +199,7 @@ void test_vluxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4(
@@ -212,7 +212,7 @@ void test_vluxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2(
@@ -225,7 +225,7 @@ void test_vluxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1(
@@ -238,7 +238,7 @@ void test_vluxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2(
@@ -251,7 +251,7 @@ void test_vluxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2(
@@ -264,7 +264,7 @@ void test_vluxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1(
@@ -277,7 +277,7 @@ void test_vluxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2(
@@ -290,7 +290,7 @@ void test_vluxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4(
@@ -303,7 +303,7 @@ void test_vluxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1(
@@ -316,7 +316,7 @@ void test_vluxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2(
@@ -329,7 +329,7 @@ void test_vluxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4(
@@ -342,7 +342,7 @@ void test_vluxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8(
@@ -355,7 +355,7 @@ void test_vluxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4(
@@ -368,7 +368,7 @@ void test_vluxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2(
@@ -381,7 +381,7 @@ void test_vluxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1(
@@ -394,7 +394,7 @@ void test_vluxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4(
@@ -407,7 +407,7 @@ void test_vluxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2(
@@ -420,7 +420,7 @@ void test_vluxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1(
@@ -433,7 +433,7 @@ void test_vluxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2(
@@ -446,7 +446,7 @@ void test_vluxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2(
@@ -459,7 +459,7 @@ void test_vluxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1(
@@ -472,7 +472,7 @@ void test_vluxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2(
@@ -485,7 +485,7 @@ void test_vluxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4(
@@ -498,7 +498,7 @@ void test_vluxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1(
@@ -511,7 +511,7 @@ void test_vluxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2(
@@ -524,7 +524,7 @@ void test_vluxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4(
@@ -537,7 +537,7 @@ void test_vluxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m(
@@ -550,7 +550,7 @@ void test_vluxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_m(
@@ -563,7 +563,7 @@ void test_vluxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m(
@@ -576,7 +576,7 @@ void test_vluxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m(
@@ -589,7 +589,7 @@ void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m(
@@ -602,7 +602,7 @@ void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m(
@@ -615,7 +615,7 @@ void test_vluxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_m(
@@ -628,7 +628,7 @@ void test_vluxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_m(
@@ -641,7 +641,7 @@ void test_vluxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_m(
@@ -654,7 +654,7 @@ void test_vluxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_m(
@@ -667,7 +667,7 @@ void test_vluxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_m(
@@ -680,7 +680,7 @@ void test_vluxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m(
@@ -693,7 +693,7 @@ void test_vluxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m(
@@ -706,7 +706,7 @@ void test_vluxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m(
@@ -719,7 +719,7 @@ void test_vluxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m(
@@ -732,7 +732,7 @@ void test_vluxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m(
@@ -745,7 +745,7 @@ void test_vluxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m(
@@ -758,7 +758,7 @@ void test_vluxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m(
@@ -771,7 +771,7 @@ void test_vluxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_m(
@@ -784,7 +784,7 @@ void test_vluxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m(
@@ -797,7 +797,7 @@ void test_vluxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m(
@@ -810,7 +810,7 @@ void test_vluxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_m(
@@ -823,7 +823,7 @@ void test_vluxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_m(
@@ -836,7 +836,7 @@ void test_vluxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_m(
@@ -849,7 +849,7 @@ void test_vluxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_m(
@@ -862,7 +862,7 @@ void test_vluxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_m(
@@ -875,7 +875,7 @@ void test_vluxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m(
@@ -888,7 +888,7 @@ void test_vluxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m(
@@ -901,7 +901,7 @@ void test_vluxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m(
@@ -914,7 +914,7 @@ void test_vluxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m(
@@ -927,7 +927,7 @@ void test_vluxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m(
@@ -940,7 +940,7 @@ void test_vluxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m(
@@ -953,7 +953,7 @@ void test_vluxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m(
@@ -966,7 +966,7 @@ void test_vluxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_m(
@@ -979,7 +979,7 @@ void test_vluxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m(
@@ -992,7 +992,7 @@ void test_vluxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m(
@@ -1005,7 +1005,7 @@ void test_vluxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_m(
@@ -1018,7 +1018,7 @@ void test_vluxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_m(
@@ -1031,7 +1031,7 @@ void test_vluxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_m(
@@ -1044,7 +1044,7 @@ void test_vluxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_m(
@@ -1057,7 +1057,7 @@ void test_vluxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m(
@@ -1070,6 +1070,6 @@ void test_vluxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei64(v0, v1, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c
index 22b0f2798125..50097fc02820 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2(
@@ -30,7 +30,7 @@ void test_vluxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Floa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1(
@@ -43,7 +43,7 @@ void test_vluxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Floa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2(
@@ -56,7 +56,7 @@ void test_vluxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4(
@@ -69,7 +69,7 @@ void test_vluxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2(
@@ -82,7 +82,7 @@ void test_vluxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1(
@@ -95,7 +95,7 @@ void test_vluxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2(
@@ -108,7 +108,7 @@ void test_vluxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *b
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4(
@@ -121,7 +121,7 @@ void test_vluxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *b
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1(
@@ -134,7 +134,7 @@ void test_vluxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *b
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2(
@@ -147,7 +147,7 @@ void test_vluxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4(
@@ -160,7 +160,7 @@ void test_vluxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8(
@@ -173,7 +173,7 @@ void test_vluxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4(
@@ -186,7 +186,7 @@ void test_vluxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2(
@@ -199,7 +199,7 @@ void test_vluxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1(
@@ -212,7 +212,7 @@ void test_vluxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2(
@@ -225,7 +225,7 @@ void test_vluxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4(
@@ -238,7 +238,7 @@ void test_vluxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4(
@@ -251,7 +251,7 @@ void test_vluxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2(
@@ -264,7 +264,7 @@ void test_vluxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1(
@@ -277,7 +277,7 @@ void test_vluxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2(
@@ -290,7 +290,7 @@ void test_vluxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4(
@@ -303,7 +303,7 @@ void test_vluxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2(
@@ -316,7 +316,7 @@ void test_vluxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1(
@@ -329,7 +329,7 @@ void test_vluxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2(
@@ -342,7 +342,7 @@ void test_vluxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4(
@@ -355,7 +355,7 @@ void test_vluxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1(
@@ -368,7 +368,7 @@ void test_vluxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2(
@@ -381,7 +381,7 @@ void test_vluxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4(
@@ -394,7 +394,7 @@ void test_vluxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8(
@@ -407,7 +407,7 @@ void test_vluxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *bas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4(
@@ -420,7 +420,7 @@ void test_vluxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *b
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2(
@@ -433,7 +433,7 @@ void test_vluxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *b
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1(
@@ -446,7 +446,7 @@ void test_vluxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *b
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2(
@@ -459,7 +459,7 @@ void test_vluxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4(
@@ -472,7 +472,7 @@ void test_vluxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4(
@@ -485,7 +485,7 @@ void test_vluxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2(
@@ -498,7 +498,7 @@ void test_vluxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1(
@@ -511,7 +511,7 @@ void test_vluxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2(
@@ -524,7 +524,7 @@ void test_vluxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4(
@@ -537,7 +537,7 @@ void test_vluxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2(
@@ -550,7 +550,7 @@ void test_vluxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1(
@@ -563,7 +563,7 @@ void test_vluxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2(
@@ -576,7 +576,7 @@ void test_vluxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4(
@@ -589,7 +589,7 @@ void test_vluxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1(
@@ -602,7 +602,7 @@ void test_vluxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2(
@@ -615,7 +615,7 @@ void test_vluxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4(
@@ -628,7 +628,7 @@ void test_vluxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m(
@@ -641,7 +641,7 @@ void test_vluxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_m(
@@ -654,7 +654,7 @@ void test_vluxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m(
@@ -667,7 +667,7 @@ void test_vluxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m(
@@ -680,7 +680,7 @@ void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m(
@@ -693,7 +693,7 @@ void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m(
@@ -706,7 +706,7 @@ void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m(
@@ -719,7 +719,7 @@ void test_vluxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_m(
@@ -732,7 +732,7 @@ void test_vluxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_m(
@@ -745,7 +745,7 @@ void test_vluxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_m(
@@ -758,7 +758,7 @@ void test_vluxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_m(
@@ -771,7 +771,7 @@ void test_vluxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_m(
@@ -784,7 +784,7 @@ void test_vluxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m(
@@ -797,7 +797,7 @@ void test_vluxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m(
@@ -810,7 +810,7 @@ void test_vluxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m(
@@ -823,7 +823,7 @@ void test_vluxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m(
@@ -836,7 +836,7 @@ void test_vluxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_m(
@@ -849,7 +849,7 @@ void test_vluxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, cons
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_m(
@@ -862,7 +862,7 @@ void test_vluxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, cons
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m(
@@ -875,7 +875,7 @@ void test_vluxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, cons
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m(
@@ -888,7 +888,7 @@ void test_vluxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vluxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_m(
@@ -914,7 +914,7 @@ void test_vluxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_m(
@@ -927,7 +927,7 @@ void test_vluxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m(
@@ -940,7 +940,7 @@ void test_vluxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m(
@@ -953,7 +953,7 @@ void test_vluxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_m(
@@ -966,7 +966,7 @@ void test_vluxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_m(
@@ -979,7 +979,7 @@ void test_vluxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_m(
@@ -992,7 +992,7 @@ void test_vluxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_m(
@@ -1005,7 +1005,7 @@ void test_vluxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_m(
@@ -1018,7 +1018,7 @@ void test_vluxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m(
@@ -1031,7 +1031,7 @@ void test_vluxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m(
@@ -1044,7 +1044,7 @@ void test_vluxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m(
@@ -1057,7 +1057,7 @@ void test_vluxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m(
@@ -1070,7 +1070,7 @@ void test_vluxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_m(
@@ -1083,7 +1083,7 @@ void test_vluxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m(
@@ -1096,7 +1096,7 @@ void test_vluxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m(
@@ -1109,7 +1109,7 @@ void test_vluxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m(
@@ -1122,7 +1122,7 @@ void test_vluxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m(
@@ -1135,7 +1135,7 @@ void test_vluxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_m(
@@ -1148,7 +1148,7 @@ void test_vluxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m(
@@ -1161,7 +1161,7 @@ void test_vluxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m(
@@ -1174,7 +1174,7 @@ void test_vluxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m(
@@ -1187,7 +1187,7 @@ void test_vluxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_m(
@@ -1200,7 +1200,7 @@ void test_vluxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m(
@@ -1213,7 +1213,7 @@ void test_vluxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_m(
@@ -1226,7 +1226,7 @@ void test_vluxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_m(
@@ -1239,7 +1239,7 @@ void test_vluxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_m(
@@ -1252,6 +1252,6 @@ void test_vluxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8(v0, v1, mask, base, bindex, vl);
+ return __riscv_vluxseg2ei8(v0, v1, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c
index 6a3affe2ea0f..1286dee03abe 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2(
@@ -34,7 +34,7 @@ void test_vluxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1(
@@ -49,7 +49,7 @@ void test_vluxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2(
@@ -64,7 +64,7 @@ void test_vluxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2(
@@ -79,7 +79,7 @@ void test_vluxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1(
@@ -94,7 +94,7 @@ void test_vluxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2(
@@ -109,7 +109,7 @@ void test_vluxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1(
@@ -124,7 +124,7 @@ void test_vluxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2(
@@ -139,7 +139,7 @@ void test_vluxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8(
@@ -154,7 +154,7 @@ void test_vluxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4(
@@ -169,7 +169,7 @@ void test_vluxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2(
@@ -184,7 +184,7 @@ void test_vluxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1(
@@ -199,7 +199,7 @@ void test_vluxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2(
@@ -214,7 +214,7 @@ void test_vluxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4(
@@ -229,7 +229,7 @@ void test_vluxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2(
@@ -244,7 +244,7 @@ void test_vluxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1(
@@ -259,7 +259,7 @@ void test_vluxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2(
@@ -274,7 +274,7 @@ void test_vluxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2(
@@ -289,7 +289,7 @@ void test_vluxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1(
@@ -304,7 +304,7 @@ void test_vluxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2(
@@ -319,7 +319,7 @@ void test_vluxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1(
@@ -334,7 +334,7 @@ void test_vluxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2(
@@ -349,7 +349,7 @@ void test_vluxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8(
@@ -364,7 +364,7 @@ void test_vluxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4(
@@ -379,7 +379,7 @@ void test_vluxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2(
@@ -394,7 +394,7 @@ void test_vluxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1(
@@ -409,7 +409,7 @@ void test_vluxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2(
@@ -424,7 +424,7 @@ void test_vluxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4(
@@ -439,7 +439,7 @@ void test_vluxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2(
@@ -454,7 +454,7 @@ void test_vluxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1(
@@ -469,7 +469,7 @@ void test_vluxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2(
@@ -484,7 +484,7 @@ void test_vluxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2(
@@ -499,7 +499,7 @@ void test_vluxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1(
@@ -514,7 +514,7 @@ void test_vluxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2(
@@ -529,7 +529,7 @@ void test_vluxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1(
@@ -544,7 +544,7 @@ void test_vluxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2(
@@ -559,7 +559,7 @@ void test_vluxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_m(
@@ -574,7 +574,7 @@ void test_vluxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_m(
@@ -589,7 +589,7 @@ void test_vluxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m(
@@ -604,7 +604,7 @@ void test_vluxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m(
@@ -619,7 +619,7 @@ void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_m(
@@ -649,7 +649,7 @@ void test_vluxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_m(
@@ -664,7 +664,7 @@ void test_vluxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_m(
@@ -679,7 +679,7 @@ void test_vluxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_m(
@@ -694,7 +694,7 @@ void test_vluxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m(
@@ -709,7 +709,7 @@ void test_vluxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m(
@@ -724,7 +724,7 @@ void test_vluxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vluxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_m(
@@ -754,7 +754,7 @@ void test_vluxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_m(
@@ -769,7 +769,7 @@ void test_vluxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m(
@@ -784,7 +784,7 @@ void test_vluxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m(
@@ -799,7 +799,7 @@ void test_vluxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_m(
@@ -814,7 +814,7 @@ void test_vluxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_m(
@@ -829,7 +829,7 @@ void test_vluxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vluxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_m(
@@ -859,7 +859,7 @@ void test_vluxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_m(
@@ -874,7 +874,7 @@ void test_vluxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_m(
@@ -889,7 +889,7 @@ void test_vluxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_m(
@@ -904,7 +904,7 @@ void test_vluxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m(
@@ -919,7 +919,7 @@ void test_vluxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m(
@@ -934,7 +934,7 @@ void test_vluxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vluxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_m(
@@ -964,7 +964,7 @@ void test_vluxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_m(
@@ -979,7 +979,7 @@ void test_vluxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m(
@@ -994,7 +994,7 @@ void test_vluxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m(
@@ -1009,7 +1009,7 @@ void test_vluxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_m(
@@ -1024,7 +1024,7 @@ void test_vluxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_m(
@@ -1039,7 +1039,7 @@ void test_vluxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_m(
@@ -1069,7 +1069,7 @@ void test_vluxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_m(
@@ -1084,7 +1084,7 @@ void test_vluxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_m(
@@ -1099,7 +1099,7 @@ void test_vluxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_m(
@@ -1114,6 +1114,6 @@ void test_vluxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c
index 37d9fae5a409..f167e5944a1d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2(
@@ -34,7 +34,7 @@ void test_vluxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1(
@@ -49,7 +49,7 @@ void test_vluxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2(
@@ -64,7 +64,7 @@ void test_vluxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2(
@@ -79,7 +79,7 @@ void test_vluxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1(
@@ -94,7 +94,7 @@ void test_vluxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2(
@@ -109,7 +109,7 @@ void test_vluxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1(
@@ -124,7 +124,7 @@ void test_vluxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2(
@@ -139,7 +139,7 @@ void test_vluxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8(
@@ -154,7 +154,7 @@ void test_vluxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4(
@@ -169,7 +169,7 @@ void test_vluxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2(
@@ -184,7 +184,7 @@ void test_vluxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1(
@@ -199,7 +199,7 @@ void test_vluxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2(
@@ -214,7 +214,7 @@ void test_vluxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4(
@@ -229,7 +229,7 @@ void test_vluxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2(
@@ -244,7 +244,7 @@ void test_vluxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1(
@@ -259,7 +259,7 @@ void test_vluxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2(
@@ -274,7 +274,7 @@ void test_vluxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2(
@@ -289,7 +289,7 @@ void test_vluxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1(
@@ -304,7 +304,7 @@ void test_vluxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2(
@@ -319,7 +319,7 @@ void test_vluxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1(
@@ -334,7 +334,7 @@ void test_vluxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2(
@@ -349,7 +349,7 @@ void test_vluxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8(
@@ -364,7 +364,7 @@ void test_vluxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4(
@@ -379,7 +379,7 @@ void test_vluxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2(
@@ -394,7 +394,7 @@ void test_vluxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1(
@@ -409,7 +409,7 @@ void test_vluxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2(
@@ -424,7 +424,7 @@ void test_vluxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4(
@@ -439,7 +439,7 @@ void test_vluxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2(
@@ -454,7 +454,7 @@ void test_vluxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1(
@@ -469,7 +469,7 @@ void test_vluxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2(
@@ -484,7 +484,7 @@ void test_vluxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2(
@@ -499,7 +499,7 @@ void test_vluxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1(
@@ -514,7 +514,7 @@ void test_vluxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2(
@@ -529,7 +529,7 @@ void test_vluxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1(
@@ -544,7 +544,7 @@ void test_vluxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2(
@@ -559,7 +559,7 @@ void test_vluxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_m(
@@ -574,7 +574,7 @@ void test_vluxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_m(
@@ -589,7 +589,7 @@ void test_vluxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m(
@@ -604,7 +604,7 @@ void test_vluxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m(
@@ -619,7 +619,7 @@ void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_m(
@@ -649,7 +649,7 @@ void test_vluxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_m(
@@ -664,7 +664,7 @@ void test_vluxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_m(
@@ -679,7 +679,7 @@ void test_vluxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_m(
@@ -694,7 +694,7 @@ void test_vluxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m(
@@ -709,7 +709,7 @@ void test_vluxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m(
@@ -724,7 +724,7 @@ void test_vluxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vluxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m(
@@ -754,7 +754,7 @@ void test_vluxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_m(
@@ -769,7 +769,7 @@ void test_vluxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m(
@@ -784,7 +784,7 @@ void test_vluxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m(
@@ -799,7 +799,7 @@ void test_vluxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_m(
@@ -814,7 +814,7 @@ void test_vluxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_m(
@@ -829,7 +829,7 @@ void test_vluxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vluxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_m(
@@ -859,7 +859,7 @@ void test_vluxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_m(
@@ -874,7 +874,7 @@ void test_vluxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_m(
@@ -889,7 +889,7 @@ void test_vluxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_m(
@@ -904,7 +904,7 @@ void test_vluxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m(
@@ -919,7 +919,7 @@ void test_vluxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m(
@@ -934,7 +934,7 @@ void test_vluxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vluxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m(
@@ -964,7 +964,7 @@ void test_vluxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m(
@@ -979,7 +979,7 @@ void test_vluxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m(
@@ -994,7 +994,7 @@ void test_vluxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m(
@@ -1009,7 +1009,7 @@ void test_vluxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_m(
@@ -1024,7 +1024,7 @@ void test_vluxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_m(
@@ -1039,7 +1039,7 @@ void test_vluxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_m(
@@ -1069,7 +1069,7 @@ void test_vluxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_m(
@@ -1084,7 +1084,7 @@ void test_vluxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_m(
@@ -1099,7 +1099,7 @@ void test_vluxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_m(
@@ -1114,6 +1114,6 @@ void test_vluxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c
index 04cdd0323aa6..170c1462b866 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2(
@@ -34,7 +34,7 @@ void test_vluxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1(
@@ -49,7 +49,7 @@ void test_vluxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2(
@@ -64,7 +64,7 @@ void test_vluxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2(
@@ -79,7 +79,7 @@ void test_vluxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1(
@@ -94,7 +94,7 @@ void test_vluxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2(
@@ -109,7 +109,7 @@ void test_vluxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1(
@@ -124,7 +124,7 @@ void test_vluxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2(
@@ -139,7 +139,7 @@ void test_vluxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8(
@@ -154,7 +154,7 @@ void test_vluxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4(
@@ -169,7 +169,7 @@ void test_vluxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2(
@@ -184,7 +184,7 @@ void test_vluxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1(
@@ -199,7 +199,7 @@ void test_vluxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4(
@@ -214,7 +214,7 @@ void test_vluxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2(
@@ -229,7 +229,7 @@ void test_vluxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1(
@@ -244,7 +244,7 @@ void test_vluxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2(
@@ -259,7 +259,7 @@ void test_vluxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2(
@@ -274,7 +274,7 @@ void test_vluxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1(
@@ -289,7 +289,7 @@ void test_vluxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2(
@@ -304,7 +304,7 @@ void test_vluxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1(
@@ -319,7 +319,7 @@ void test_vluxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2(
@@ -334,7 +334,7 @@ void test_vluxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8(
@@ -349,7 +349,7 @@ void test_vluxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4(
@@ -364,7 +364,7 @@ void test_vluxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2(
@@ -379,7 +379,7 @@ void test_vluxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1(
@@ -394,7 +394,7 @@ void test_vluxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4(
@@ -409,7 +409,7 @@ void test_vluxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2(
@@ -424,7 +424,7 @@ void test_vluxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1(
@@ -439,7 +439,7 @@ void test_vluxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2(
@@ -454,7 +454,7 @@ void test_vluxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2(
@@ -469,7 +469,7 @@ void test_vluxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1(
@@ -484,7 +484,7 @@ void test_vluxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2(
@@ -499,7 +499,7 @@ void test_vluxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1(
@@ -514,7 +514,7 @@ void test_vluxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2(
@@ -529,7 +529,7 @@ void test_vluxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_m(
@@ -544,7 +544,7 @@ void test_vluxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_m(
@@ -559,7 +559,7 @@ void test_vluxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m(
@@ -574,7 +574,7 @@ void test_vluxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m(
@@ -589,7 +589,7 @@ void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m(
@@ -604,7 +604,7 @@ void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_m(
@@ -619,7 +619,7 @@ void test_vluxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_m(
@@ -634,7 +634,7 @@ void test_vluxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_m(
@@ -649,7 +649,7 @@ void test_vluxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_m(
@@ -664,7 +664,7 @@ void test_vluxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m(
@@ -679,7 +679,7 @@ void test_vluxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m(
@@ -694,7 +694,7 @@ void test_vluxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m(
@@ -709,7 +709,7 @@ void test_vluxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_m(
@@ -724,7 +724,7 @@ void test_vluxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m(
@@ -739,7 +739,7 @@ void test_vluxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m(
@@ -754,7 +754,7 @@ void test_vluxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m(
@@ -769,7 +769,7 @@ void test_vluxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_m(
@@ -784,7 +784,7 @@ void test_vluxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m(
@@ -799,7 +799,7 @@ void test_vluxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_m(
@@ -814,7 +814,7 @@ void test_vluxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_m(
@@ -829,7 +829,7 @@ void test_vluxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_m(
@@ -844,7 +844,7 @@ void test_vluxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_m(
@@ -859,7 +859,7 @@ void test_vluxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m(
@@ -874,7 +874,7 @@ void test_vluxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m(
@@ -889,7 +889,7 @@ void test_vluxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m(
@@ -904,7 +904,7 @@ void test_vluxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_m(
@@ -919,7 +919,7 @@ void test_vluxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m(
@@ -934,7 +934,7 @@ void test_vluxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m(
@@ -949,7 +949,7 @@ void test_vluxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_m(
@@ -964,7 +964,7 @@ void test_vluxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_m(
@@ -979,7 +979,7 @@ void test_vluxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m(
@@ -994,7 +994,7 @@ void test_vluxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_m(
@@ -1009,7 +1009,7 @@ void test_vluxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_m(
@@ -1024,7 +1024,7 @@ void test_vluxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_m(
@@ -1039,7 +1039,7 @@ void test_vluxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_m(
@@ -1054,6 +1054,6 @@ void test_vluxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c
index 9149354325b5..a296f1727132 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2(
@@ -34,7 +34,7 @@ void test_vluxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1(
@@ -49,7 +49,7 @@ void test_vluxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2(
@@ -64,7 +64,7 @@ void test_vluxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2(
@@ -79,7 +79,7 @@ void test_vluxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1(
@@ -94,7 +94,7 @@ void test_vluxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2(
@@ -109,7 +109,7 @@ void test_vluxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1(
@@ -124,7 +124,7 @@ void test_vluxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2(
@@ -139,7 +139,7 @@ void test_vluxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8(
@@ -154,7 +154,7 @@ void test_vluxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4(
@@ -169,7 +169,7 @@ void test_vluxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2(
@@ -184,7 +184,7 @@ void test_vluxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1(
@@ -199,7 +199,7 @@ void test_vluxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2(
@@ -214,7 +214,7 @@ void test_vluxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4(
@@ -229,7 +229,7 @@ void test_vluxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2(
@@ -244,7 +244,7 @@ void test_vluxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1(
@@ -259,7 +259,7 @@ void test_vluxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2(
@@ -274,7 +274,7 @@ void test_vluxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2(
@@ -289,7 +289,7 @@ void test_vluxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1(
@@ -304,7 +304,7 @@ void test_vluxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2(
@@ -319,7 +319,7 @@ void test_vluxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1(
@@ -334,7 +334,7 @@ void test_vluxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2(
@@ -349,7 +349,7 @@ void test_vluxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8(
@@ -364,7 +364,7 @@ void test_vluxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, co
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4(
@@ -379,7 +379,7 @@ void test_vluxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2(
@@ -394,7 +394,7 @@ void test_vluxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1(
@@ -409,7 +409,7 @@ void test_vluxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2(
@@ -424,7 +424,7 @@ void test_vluxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, con
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4(
@@ -439,7 +439,7 @@ void test_vluxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, con
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2(
@@ -454,7 +454,7 @@ void test_vluxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1(
@@ -469,7 +469,7 @@ void test_vluxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2(
@@ -484,7 +484,7 @@ void test_vluxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2(
@@ -499,7 +499,7 @@ void test_vluxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1(
@@ -514,7 +514,7 @@ void test_vluxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2(
@@ -529,7 +529,7 @@ void test_vluxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1(
@@ -544,7 +544,7 @@ void test_vluxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2(
@@ -559,7 +559,7 @@ void test_vluxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_m(
@@ -574,7 +574,7 @@ void test_vluxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_m(
@@ -589,7 +589,7 @@ void test_vluxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m(
@@ -604,7 +604,7 @@ void test_vluxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m(
@@ -619,7 +619,7 @@ void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_m(
@@ -649,7 +649,7 @@ void test_vluxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_m(
@@ -664,7 +664,7 @@ void test_vluxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_m(
@@ -679,7 +679,7 @@ void test_vluxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_m(
@@ -694,7 +694,7 @@ void test_vluxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m(
@@ -709,7 +709,7 @@ void test_vluxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m(
@@ -724,7 +724,7 @@ void test_vluxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vluxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_m(
@@ -754,7 +754,7 @@ void test_vluxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_m(
@@ -769,7 +769,7 @@ void test_vluxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m(
@@ -784,7 +784,7 @@ void test_vluxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m(
@@ -799,7 +799,7 @@ void test_vluxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m(
@@ -814,7 +814,7 @@ void test_vluxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_m(
@@ -829,7 +829,7 @@ void test_vluxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vluxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_m(
@@ -859,7 +859,7 @@ void test_vluxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_m(
@@ -874,7 +874,7 @@ void test_vluxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_m(
@@ -889,7 +889,7 @@ void test_vluxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_m(
@@ -904,7 +904,7 @@ void test_vluxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m(
@@ -919,7 +919,7 @@ void test_vluxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m(
@@ -934,7 +934,7 @@ void test_vluxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vluxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_m(
@@ -964,7 +964,7 @@ void test_vluxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_m(
@@ -979,7 +979,7 @@ void test_vluxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m(
@@ -994,7 +994,7 @@ void test_vluxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m(
@@ -1009,7 +1009,7 @@ void test_vluxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_m(
@@ -1024,7 +1024,7 @@ void test_vluxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_m(
@@ -1039,7 +1039,7 @@ void test_vluxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_m(
@@ -1069,7 +1069,7 @@ void test_vluxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_m(
@@ -1084,7 +1084,7 @@ void test_vluxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_m(
@@ -1099,7 +1099,7 @@ void test_vluxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_m(
@@ -1114,6 +1114,6 @@ void test_vluxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
+ return __riscv_vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c
index 4fae94812712..105f2c6fd46d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2(
@@ -38,7 +38,7 @@ void test_vluxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1(
@@ -55,7 +55,7 @@ void test_vluxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2(
@@ -72,7 +72,7 @@ void test_vluxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2(
@@ -89,7 +89,7 @@ void test_vluxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1(
@@ -106,7 +106,7 @@ void test_vluxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2(
@@ -123,7 +123,7 @@ void test_vluxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1(
@@ -140,7 +140,7 @@ void test_vluxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2(
@@ -157,7 +157,7 @@ void test_vluxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8(
@@ -174,7 +174,7 @@ void test_vluxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4(
@@ -191,7 +191,7 @@ void test_vluxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2(
@@ -208,7 +208,7 @@ void test_vluxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1(
@@ -225,7 +225,7 @@ void test_vluxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2(
@@ -242,7 +242,7 @@ void test_vluxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4(
@@ -259,7 +259,7 @@ void test_vluxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2(
@@ -276,7 +276,7 @@ void test_vluxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1(
@@ -293,7 +293,7 @@ void test_vluxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2(
@@ -310,7 +310,7 @@ void test_vluxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2(
@@ -327,7 +327,7 @@ void test_vluxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1(
@@ -344,7 +344,7 @@ void test_vluxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2(
@@ -361,7 +361,7 @@ void test_vluxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1(
@@ -378,7 +378,7 @@ void test_vluxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2(
@@ -395,7 +395,7 @@ void test_vluxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8(
@@ -412,7 +412,7 @@ void test_vluxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4(
@@ -429,7 +429,7 @@ void test_vluxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2(
@@ -446,7 +446,7 @@ void test_vluxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1(
@@ -463,7 +463,7 @@ void test_vluxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2(
@@ -480,7 +480,7 @@ void test_vluxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4(
@@ -497,7 +497,7 @@ void test_vluxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2(
@@ -514,7 +514,7 @@ void test_vluxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1(
@@ -531,7 +531,7 @@ void test_vluxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2(
@@ -548,7 +548,7 @@ void test_vluxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2(
@@ -565,7 +565,7 @@ void test_vluxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1(
@@ -582,7 +582,7 @@ void test_vluxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2(
@@ -599,7 +599,7 @@ void test_vluxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1(
@@ -616,7 +616,7 @@ void test_vluxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2(
@@ -633,7 +633,7 @@ void test_vluxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_m(
@@ -650,7 +650,7 @@ void test_vluxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_m(
@@ -667,7 +667,7 @@ void test_vluxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m(
@@ -684,7 +684,7 @@ void test_vluxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m(
@@ -701,7 +701,7 @@ void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m(
@@ -718,7 +718,7 @@ void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_m(
@@ -735,7 +735,7 @@ void test_vluxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_m(
@@ -752,7 +752,7 @@ void test_vluxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_m(
@@ -769,7 +769,7 @@ void test_vluxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_m(
@@ -786,7 +786,7 @@ void test_vluxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m(
@@ -803,7 +803,7 @@ void test_vluxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m(
@@ -820,7 +820,7 @@ void test_vluxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m(
@@ -837,7 +837,7 @@ void test_vluxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_m(
@@ -854,7 +854,7 @@ void test_vluxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_m(
@@ -871,7 +871,7 @@ void test_vluxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m(
@@ -888,7 +888,7 @@ void test_vluxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m(
@@ -905,7 +905,7 @@ void test_vluxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_m(
@@ -922,7 +922,7 @@ void test_vluxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_m(
@@ -939,7 +939,7 @@ void test_vluxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m(
@@ -956,7 +956,7 @@ void test_vluxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_m(
@@ -973,7 +973,7 @@ void test_vluxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_m(
@@ -990,7 +990,7 @@ void test_vluxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_m(
@@ -1007,7 +1007,7 @@ void test_vluxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_m(
@@ -1024,7 +1024,7 @@ void test_vluxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m(
@@ -1041,7 +1041,7 @@ void test_vluxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m(
@@ -1058,7 +1058,7 @@ void test_vluxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m(
@@ -1075,7 +1075,7 @@ void test_vluxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_m(
@@ -1092,7 +1092,7 @@ void test_vluxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_m(
@@ -1109,7 +1109,7 @@ void test_vluxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m(
@@ -1126,7 +1126,7 @@ void test_vluxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m(
@@ -1143,7 +1143,7 @@ void test_vluxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_m(
@@ -1160,7 +1160,7 @@ void test_vluxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_m(
@@ -1177,7 +1177,7 @@ void test_vluxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m(
@@ -1194,7 +1194,7 @@ void test_vluxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_m(
@@ -1211,7 +1211,7 @@ void test_vluxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_m(
@@ -1228,7 +1228,7 @@ void test_vluxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_m(
@@ -1245,7 +1245,7 @@ void test_vluxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_m(
@@ -1262,6 +1262,6 @@ void test_vluxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c
index 5e71702489c7..35d492206340 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2(
@@ -38,7 +38,7 @@ void test_vluxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1(
@@ -55,7 +55,7 @@ void test_vluxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2(
@@ -72,7 +72,7 @@ void test_vluxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2(
@@ -89,7 +89,7 @@ void test_vluxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1(
@@ -106,7 +106,7 @@ void test_vluxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2(
@@ -123,7 +123,7 @@ void test_vluxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1(
@@ -140,7 +140,7 @@ void test_vluxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2(
@@ -157,7 +157,7 @@ void test_vluxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8(
@@ -174,7 +174,7 @@ void test_vluxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4(
@@ -191,7 +191,7 @@ void test_vluxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2(
@@ -208,7 +208,7 @@ void test_vluxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1(
@@ -225,7 +225,7 @@ void test_vluxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2(
@@ -242,7 +242,7 @@ void test_vluxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4(
@@ -259,7 +259,7 @@ void test_vluxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2(
@@ -276,7 +276,7 @@ void test_vluxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1(
@@ -293,7 +293,7 @@ void test_vluxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2(
@@ -310,7 +310,7 @@ void test_vluxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2(
@@ -327,7 +327,7 @@ void test_vluxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1(
@@ -344,7 +344,7 @@ void test_vluxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2(
@@ -361,7 +361,7 @@ void test_vluxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1(
@@ -378,7 +378,7 @@ void test_vluxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2(
@@ -395,7 +395,7 @@ void test_vluxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8(
@@ -412,7 +412,7 @@ void test_vluxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4(
@@ -429,7 +429,7 @@ void test_vluxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2(
@@ -446,7 +446,7 @@ void test_vluxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1(
@@ -463,7 +463,7 @@ void test_vluxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2(
@@ -480,7 +480,7 @@ void test_vluxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4(
@@ -497,7 +497,7 @@ void test_vluxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2(
@@ -514,7 +514,7 @@ void test_vluxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1(
@@ -531,7 +531,7 @@ void test_vluxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2(
@@ -548,7 +548,7 @@ void test_vluxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2(
@@ -565,7 +565,7 @@ void test_vluxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1(
@@ -582,7 +582,7 @@ void test_vluxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2(
@@ -599,7 +599,7 @@ void test_vluxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1(
@@ -616,7 +616,7 @@ void test_vluxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2(
@@ -633,7 +633,7 @@ void test_vluxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_m(
@@ -650,7 +650,7 @@ void test_vluxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_m(
@@ -667,7 +667,7 @@ void test_vluxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m(
@@ -684,7 +684,7 @@ void test_vluxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m(
@@ -701,7 +701,7 @@ void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m(
@@ -718,7 +718,7 @@ void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_m(
@@ -735,7 +735,7 @@ void test_vluxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_m(
@@ -752,7 +752,7 @@ void test_vluxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_m(
@@ -769,7 +769,7 @@ void test_vluxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_m(
@@ -786,7 +786,7 @@ void test_vluxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m(
@@ -803,7 +803,7 @@ void test_vluxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m(
@@ -820,7 +820,7 @@ void test_vluxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m(
@@ -837,7 +837,7 @@ void test_vluxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m(
@@ -854,7 +854,7 @@ void test_vluxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_m(
@@ -871,7 +871,7 @@ void test_vluxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m(
@@ -888,7 +888,7 @@ void test_vluxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m(
@@ -905,7 +905,7 @@ void test_vluxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_m(
@@ -922,7 +922,7 @@ void test_vluxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_m(
@@ -939,7 +939,7 @@ void test_vluxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m(
@@ -956,7 +956,7 @@ void test_vluxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_m(
@@ -973,7 +973,7 @@ void test_vluxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_m(
@@ -990,7 +990,7 @@ void test_vluxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_m(
@@ -1007,7 +1007,7 @@ void test_vluxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_m(
@@ -1024,7 +1024,7 @@ void test_vluxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m(
@@ -1041,7 +1041,7 @@ void test_vluxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m(
@@ -1058,7 +1058,7 @@ void test_vluxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m(
@@ -1075,7 +1075,7 @@ void test_vluxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_m(
@@ -1092,7 +1092,7 @@ void test_vluxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m(
@@ -1109,7 +1109,7 @@ void test_vluxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m(
@@ -1126,7 +1126,7 @@ void test_vluxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m(
@@ -1143,7 +1143,7 @@ void test_vluxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_m(
@@ -1160,7 +1160,7 @@ void test_vluxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_m(
@@ -1177,7 +1177,7 @@ void test_vluxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m(
@@ -1194,7 +1194,7 @@ void test_vluxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_m(
@@ -1211,7 +1211,7 @@ void test_vluxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_m(
@@ -1228,7 +1228,7 @@ void test_vluxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_m(
@@ -1245,7 +1245,7 @@ void test_vluxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_m(
@@ -1262,6 +1262,6 @@ void test_vluxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c
index 1d73007468e9..d17d1df0f4ce 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2(
@@ -38,7 +38,7 @@ void test_vluxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1(
@@ -55,7 +55,7 @@ void test_vluxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2(
@@ -72,7 +72,7 @@ void test_vluxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2(
@@ -89,7 +89,7 @@ void test_vluxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1(
@@ -106,7 +106,7 @@ void test_vluxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2(
@@ -123,7 +123,7 @@ void test_vluxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1(
@@ -140,7 +140,7 @@ void test_vluxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2(
@@ -157,7 +157,7 @@ void test_vluxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8(
@@ -174,7 +174,7 @@ void test_vluxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4(
@@ -191,7 +191,7 @@ void test_vluxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2(
@@ -208,7 +208,7 @@ void test_vluxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1(
@@ -225,7 +225,7 @@ void test_vluxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4(
@@ -242,7 +242,7 @@ void test_vluxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2(
@@ -259,7 +259,7 @@ void test_vluxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1(
@@ -276,7 +276,7 @@ void test_vluxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2(
@@ -293,7 +293,7 @@ void test_vluxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2(
@@ -310,7 +310,7 @@ void test_vluxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1(
@@ -327,7 +327,7 @@ void test_vluxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2(
@@ -344,7 +344,7 @@ void test_vluxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1(
@@ -361,7 +361,7 @@ void test_vluxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2(
@@ -378,7 +378,7 @@ void test_vluxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vluxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4(
@@ -412,7 +412,7 @@ void test_vluxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2(
@@ -429,7 +429,7 @@ void test_vluxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1(
@@ -446,7 +446,7 @@ void test_vluxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4(
@@ -463,7 +463,7 @@ void test_vluxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2(
@@ -480,7 +480,7 @@ void test_vluxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1(
@@ -497,7 +497,7 @@ void test_vluxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2(
@@ -514,7 +514,7 @@ void test_vluxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2(
@@ -531,7 +531,7 @@ void test_vluxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1(
@@ -548,7 +548,7 @@ void test_vluxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2(
@@ -565,7 +565,7 @@ void test_vluxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1(
@@ -582,7 +582,7 @@ void test_vluxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2(
@@ -599,7 +599,7 @@ void test_vluxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_m(
@@ -616,7 +616,7 @@ void test_vluxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_m(
@@ -633,7 +633,7 @@ void test_vluxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m(
@@ -650,7 +650,7 @@ void test_vluxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m(
@@ -667,7 +667,7 @@ void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m(
@@ -684,7 +684,7 @@ void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_m(
@@ -701,7 +701,7 @@ void test_vluxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_m(
@@ -718,7 +718,7 @@ void test_vluxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_m(
@@ -735,7 +735,7 @@ void test_vluxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_m(
@@ -752,7 +752,7 @@ void test_vluxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m(
@@ -769,7 +769,7 @@ void test_vluxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vluxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m(
@@ -803,7 +803,7 @@ void test_vluxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_m(
@@ -820,7 +820,7 @@ void test_vluxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m(
@@ -837,7 +837,7 @@ void test_vluxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m(
@@ -854,7 +854,7 @@ void test_vluxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_m(
@@ -871,7 +871,7 @@ void test_vluxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_m(
@@ -888,7 +888,7 @@ void test_vluxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m(
@@ -905,7 +905,7 @@ void test_vluxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_m(
@@ -922,7 +922,7 @@ void test_vluxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_m(
@@ -939,7 +939,7 @@ void test_vluxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_m(
@@ -956,7 +956,7 @@ void test_vluxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_m(
@@ -973,7 +973,7 @@ void test_vluxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m(
@@ -990,7 +990,7 @@ void test_vluxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m(
@@ -1007,7 +1007,7 @@ void test_vluxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m(
@@ -1024,7 +1024,7 @@ void test_vluxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_m(
@@ -1041,7 +1041,7 @@ void test_vluxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m(
@@ -1058,7 +1058,7 @@ void test_vluxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m(
@@ -1075,7 +1075,7 @@ void test_vluxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_m(
@@ -1092,7 +1092,7 @@ void test_vluxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_m(
@@ -1109,7 +1109,7 @@ void test_vluxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m(
@@ -1126,7 +1126,7 @@ void test_vluxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_m(
@@ -1143,7 +1143,7 @@ void test_vluxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_m(
@@ -1160,7 +1160,7 @@ void test_vluxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_m(
@@ -1177,7 +1177,7 @@ void test_vluxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_m(
@@ -1194,6 +1194,6 @@ void test_vluxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c
index 2221fe28e388..09f77c6193ae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2(
@@ -38,7 +38,7 @@ void test_vluxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1(
@@ -55,7 +55,7 @@ void test_vluxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2(
@@ -72,7 +72,7 @@ void test_vluxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2(
@@ -89,7 +89,7 @@ void test_vluxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1(
@@ -106,7 +106,7 @@ void test_vluxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2(
@@ -123,7 +123,7 @@ void test_vluxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1(
@@ -140,7 +140,7 @@ void test_vluxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2(
@@ -157,7 +157,7 @@ void test_vluxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8(
@@ -174,7 +174,7 @@ void test_vluxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4(
@@ -191,7 +191,7 @@ void test_vluxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2(
@@ -208,7 +208,7 @@ void test_vluxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1(
@@ -225,7 +225,7 @@ void test_vluxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2(
@@ -242,7 +242,7 @@ void test_vluxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4(
@@ -259,7 +259,7 @@ void test_vluxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2(
@@ -276,7 +276,7 @@ void test_vluxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1(
@@ -293,7 +293,7 @@ void test_vluxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2(
@@ -310,7 +310,7 @@ void test_vluxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2(
@@ -327,7 +327,7 @@ void test_vluxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1(
@@ -344,7 +344,7 @@ void test_vluxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2(
@@ -361,7 +361,7 @@ void test_vluxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1(
@@ -378,7 +378,7 @@ void test_vluxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2(
@@ -395,7 +395,7 @@ void test_vluxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8(
@@ -412,7 +412,7 @@ void test_vluxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4(
@@ -429,7 +429,7 @@ void test_vluxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2(
@@ -446,7 +446,7 @@ void test_vluxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1(
@@ -463,7 +463,7 @@ void test_vluxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2(
@@ -480,7 +480,7 @@ void test_vluxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4(
@@ -497,7 +497,7 @@ void test_vluxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2(
@@ -514,7 +514,7 @@ void test_vluxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1(
@@ -531,7 +531,7 @@ void test_vluxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2(
@@ -548,7 +548,7 @@ void test_vluxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2(
@@ -565,7 +565,7 @@ void test_vluxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1(
@@ -582,7 +582,7 @@ void test_vluxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2(
@@ -599,7 +599,7 @@ void test_vluxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1(
@@ -616,7 +616,7 @@ void test_vluxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2(
@@ -633,7 +633,7 @@ void test_vluxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_m(
@@ -650,7 +650,7 @@ void test_vluxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_m(
@@ -667,7 +667,7 @@ void test_vluxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m(
@@ -684,7 +684,7 @@ void test_vluxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m(
@@ -701,7 +701,7 @@ void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m(
@@ -718,7 +718,7 @@ void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_m(
@@ -735,7 +735,7 @@ void test_vluxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_m(
@@ -752,7 +752,7 @@ void test_vluxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_m(
@@ -769,7 +769,7 @@ void test_vluxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_m(
@@ -786,7 +786,7 @@ void test_vluxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m(
@@ -803,7 +803,7 @@ void test_vluxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m(
@@ -820,7 +820,7 @@ void test_vluxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m(
@@ -837,7 +837,7 @@ void test_vluxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_m(
@@ -854,7 +854,7 @@ void test_vluxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_m(
@@ -871,7 +871,7 @@ void test_vluxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m(
@@ -888,7 +888,7 @@ void test_vluxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m(
@@ -905,7 +905,7 @@ void test_vluxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_m(
@@ -922,7 +922,7 @@ void test_vluxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_m(
@@ -939,7 +939,7 @@ void test_vluxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m(
@@ -956,7 +956,7 @@ void test_vluxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_m(
@@ -973,7 +973,7 @@ void test_vluxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_m(
@@ -990,7 +990,7 @@ void test_vluxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_m(
@@ -1007,7 +1007,7 @@ void test_vluxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_m(
@@ -1024,7 +1024,7 @@ void test_vluxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m(
@@ -1041,7 +1041,7 @@ void test_vluxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m(
@@ -1058,7 +1058,7 @@ void test_vluxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m(
@@ -1075,7 +1075,7 @@ void test_vluxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_m(
@@ -1092,7 +1092,7 @@ void test_vluxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_m(
@@ -1109,7 +1109,7 @@ void test_vluxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m(
@@ -1126,7 +1126,7 @@ void test_vluxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m(
@@ -1143,7 +1143,7 @@ void test_vluxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_m(
@@ -1160,7 +1160,7 @@ void test_vluxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_m(
@@ -1177,7 +1177,7 @@ void test_vluxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m(
@@ -1194,7 +1194,7 @@ void test_vluxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_m(
@@ -1211,7 +1211,7 @@ void test_vluxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_m(
@@ -1228,7 +1228,7 @@ void test_vluxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_m(
@@ -1245,7 +1245,7 @@ void test_vluxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_m(
@@ -1262,6 +1262,6 @@ void test_vluxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
+ return __riscv_vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c
index c6436510937c..dc20a3588fd9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2(
@@ -42,7 +42,7 @@ void test_vluxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1(
@@ -61,7 +61,7 @@ void test_vluxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2(
@@ -80,7 +80,7 @@ void test_vluxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1(
@@ -99,7 +99,7 @@ void test_vluxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1(
@@ -118,7 +118,7 @@ void test_vluxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8(
@@ -137,7 +137,7 @@ void test_vluxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4(
@@ -156,7 +156,7 @@ void test_vluxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2(
@@ -175,7 +175,7 @@ void test_vluxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1(
@@ -194,7 +194,7 @@ void test_vluxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4(
@@ -213,7 +213,7 @@ void test_vluxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2(
@@ -232,7 +232,7 @@ void test_vluxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1(
@@ -251,7 +251,7 @@ void test_vluxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2(
@@ -270,7 +270,7 @@ void test_vluxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1(
@@ -289,7 +289,7 @@ void test_vluxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1(
@@ -308,7 +308,7 @@ void test_vluxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8(
@@ -327,7 +327,7 @@ void test_vluxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vluxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2(
@@ -365,7 +365,7 @@ void test_vluxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1(
@@ -384,7 +384,7 @@ void test_vluxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4(
@@ -403,7 +403,7 @@ void test_vluxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2(
@@ -422,7 +422,7 @@ void test_vluxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1(
@@ -441,7 +441,7 @@ void test_vluxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2(
@@ -460,7 +460,7 @@ void test_vluxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1(
@@ -479,7 +479,7 @@ void test_vluxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1(
@@ -498,7 +498,7 @@ void test_vluxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_m(
@@ -517,7 +517,7 @@ void test_vluxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_m(
@@ -536,7 +536,7 @@ void test_vluxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m(
@@ -555,7 +555,7 @@ void test_vluxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m(
@@ -574,7 +574,7 @@ void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_m(
@@ -593,7 +593,7 @@ void test_vluxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_m(
@@ -612,7 +612,7 @@ void test_vluxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m(
@@ -631,7 +631,7 @@ void test_vluxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m(
@@ -650,7 +650,7 @@ void test_vluxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m(
@@ -669,7 +669,7 @@ void test_vluxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vluxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m(
@@ -707,7 +707,7 @@ void test_vluxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m(
@@ -726,7 +726,7 @@ void test_vluxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_m(
@@ -745,7 +745,7 @@ void test_vluxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m(
@@ -764,7 +764,7 @@ void test_vluxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_m(
@@ -783,7 +783,7 @@ void test_vluxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_m(
@@ -802,7 +802,7 @@ void test_vluxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m(
@@ -821,7 +821,7 @@ void test_vluxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m(
@@ -840,7 +840,7 @@ void test_vluxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m(
@@ -859,7 +859,7 @@ void test_vluxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_m(
@@ -878,7 +878,7 @@ void test_vluxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m(
@@ -897,7 +897,7 @@ void test_vluxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m(
@@ -916,7 +916,7 @@ void test_vluxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_m(
@@ -935,7 +935,7 @@ void test_vluxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m(
@@ -954,7 +954,7 @@ void test_vluxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_m(
@@ -973,7 +973,7 @@ void test_vluxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_m(
@@ -992,6 +992,6 @@ void test_vluxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c
index 683c3dac4dd1..b3326dcd15c0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2(
@@ -42,7 +42,7 @@ void test_vluxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1(
@@ -61,7 +61,7 @@ void test_vluxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2(
@@ -80,7 +80,7 @@ void test_vluxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1(
@@ -99,7 +99,7 @@ void test_vluxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1(
@@ -118,7 +118,7 @@ void test_vluxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8(
@@ -137,7 +137,7 @@ void test_vluxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4(
@@ -156,7 +156,7 @@ void test_vluxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2(
@@ -175,7 +175,7 @@ void test_vluxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1(
@@ -194,7 +194,7 @@ void test_vluxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4(
@@ -213,7 +213,7 @@ void test_vluxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2(
@@ -232,7 +232,7 @@ void test_vluxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1(
@@ -251,7 +251,7 @@ void test_vluxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2(
@@ -270,7 +270,7 @@ void test_vluxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1(
@@ -289,7 +289,7 @@ void test_vluxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1(
@@ -308,7 +308,7 @@ void test_vluxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8(
@@ -327,7 +327,7 @@ void test_vluxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vluxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2(
@@ -365,7 +365,7 @@ void test_vluxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1(
@@ -384,7 +384,7 @@ void test_vluxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4(
@@ -403,7 +403,7 @@ void test_vluxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2(
@@ -422,7 +422,7 @@ void test_vluxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1(
@@ -441,7 +441,7 @@ void test_vluxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2(
@@ -460,7 +460,7 @@ void test_vluxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1(
@@ -479,7 +479,7 @@ void test_vluxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1(
@@ -498,7 +498,7 @@ void test_vluxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_m(
@@ -517,7 +517,7 @@ void test_vluxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_m(
@@ -536,7 +536,7 @@ void test_vluxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m(
@@ -555,7 +555,7 @@ void test_vluxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m(
@@ -574,7 +574,7 @@ void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_m(
@@ -593,7 +593,7 @@ void test_vluxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_m(
@@ -612,7 +612,7 @@ void test_vluxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m(
@@ -631,7 +631,7 @@ void test_vluxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m(
@@ -650,7 +650,7 @@ void test_vluxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m(
@@ -669,7 +669,7 @@ void test_vluxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vluxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m(
@@ -707,7 +707,7 @@ void test_vluxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m(
@@ -726,7 +726,7 @@ void test_vluxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_m(
@@ -745,7 +745,7 @@ void test_vluxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m(
@@ -764,7 +764,7 @@ void test_vluxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_m(
@@ -783,7 +783,7 @@ void test_vluxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_m(
@@ -802,7 +802,7 @@ void test_vluxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m(
@@ -821,7 +821,7 @@ void test_vluxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m(
@@ -840,7 +840,7 @@ void test_vluxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m(
@@ -859,7 +859,7 @@ void test_vluxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_m(
@@ -878,7 +878,7 @@ void test_vluxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m(
@@ -897,7 +897,7 @@ void test_vluxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m(
@@ -916,7 +916,7 @@ void test_vluxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_m(
@@ -935,7 +935,7 @@ void test_vluxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m(
@@ -954,7 +954,7 @@ void test_vluxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_m(
@@ -973,7 +973,7 @@ void test_vluxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_m(
@@ -992,6 +992,6 @@ void test_vluxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c
index b95b47aa9d96..1aa1f214161e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2(
@@ -42,7 +42,7 @@ void test_vluxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1(
@@ -61,7 +61,7 @@ void test_vluxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2(
@@ -80,7 +80,7 @@ void test_vluxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1(
@@ -99,7 +99,7 @@ void test_vluxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1(
@@ -118,7 +118,7 @@ void test_vluxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8(
@@ -137,7 +137,7 @@ void test_vluxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4(
@@ -156,7 +156,7 @@ void test_vluxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2(
@@ -175,7 +175,7 @@ void test_vluxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1(
@@ -194,7 +194,7 @@ void test_vluxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4(
@@ -213,7 +213,7 @@ void test_vluxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2(
@@ -232,7 +232,7 @@ void test_vluxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1(
@@ -251,7 +251,7 @@ void test_vluxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2(
@@ -270,7 +270,7 @@ void test_vluxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1(
@@ -289,7 +289,7 @@ void test_vluxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1(
@@ -308,7 +308,7 @@ void test_vluxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8(
@@ -327,7 +327,7 @@ void test_vluxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vluxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2(
@@ -365,7 +365,7 @@ void test_vluxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1(
@@ -384,7 +384,7 @@ void test_vluxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4(
@@ -403,7 +403,7 @@ void test_vluxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2(
@@ -422,7 +422,7 @@ void test_vluxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1(
@@ -441,7 +441,7 @@ void test_vluxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2(
@@ -460,7 +460,7 @@ void test_vluxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1(
@@ -479,7 +479,7 @@ void test_vluxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1(
@@ -498,7 +498,7 @@ void test_vluxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_m(
@@ -517,7 +517,7 @@ void test_vluxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_m(
@@ -536,7 +536,7 @@ void test_vluxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m(
@@ -555,7 +555,7 @@ void test_vluxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m(
@@ -574,7 +574,7 @@ void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_m(
@@ -593,7 +593,7 @@ void test_vluxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_m(
@@ -612,7 +612,7 @@ void test_vluxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m(
@@ -631,7 +631,7 @@ void test_vluxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m(
@@ -650,7 +650,7 @@ void test_vluxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m(
@@ -669,7 +669,7 @@ void test_vluxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vluxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m(
@@ -707,7 +707,7 @@ void test_vluxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m(
@@ -726,7 +726,7 @@ void test_vluxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_m(
@@ -745,7 +745,7 @@ void test_vluxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m(
@@ -764,7 +764,7 @@ void test_vluxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_m(
@@ -783,7 +783,7 @@ void test_vluxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_m(
@@ -802,7 +802,7 @@ void test_vluxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m(
@@ -821,7 +821,7 @@ void test_vluxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m(
@@ -840,7 +840,7 @@ void test_vluxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m(
@@ -859,7 +859,7 @@ void test_vluxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_m(
@@ -878,7 +878,7 @@ void test_vluxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m(
@@ -897,7 +897,7 @@ void test_vluxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m(
@@ -916,7 +916,7 @@ void test_vluxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_m(
@@ -935,7 +935,7 @@ void test_vluxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m(
@@ -954,7 +954,7 @@ void test_vluxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_m(
@@ -973,7 +973,7 @@ void test_vluxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_m(
@@ -992,6 +992,6 @@ void test_vluxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c
index e463f4cf2a05..957b56c9ca8a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2(
@@ -42,7 +42,7 @@ void test_vluxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1(
@@ -61,7 +61,7 @@ void test_vluxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2(
@@ -80,7 +80,7 @@ void test_vluxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1(
@@ -99,7 +99,7 @@ void test_vluxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1(
@@ -118,7 +118,7 @@ void test_vluxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8(
@@ -137,7 +137,7 @@ void test_vluxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4(
@@ -156,7 +156,7 @@ void test_vluxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2(
@@ -175,7 +175,7 @@ void test_vluxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1(
@@ -194,7 +194,7 @@ void test_vluxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4(
@@ -213,7 +213,7 @@ void test_vluxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2(
@@ -232,7 +232,7 @@ void test_vluxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1(
@@ -251,7 +251,7 @@ void test_vluxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2(
@@ -270,7 +270,7 @@ void test_vluxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1(
@@ -289,7 +289,7 @@ void test_vluxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1(
@@ -308,7 +308,7 @@ void test_vluxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8(
@@ -327,7 +327,7 @@ void test_vluxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vluxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2(
@@ -365,7 +365,7 @@ void test_vluxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1(
@@ -384,7 +384,7 @@ void test_vluxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4(
@@ -403,7 +403,7 @@ void test_vluxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2(
@@ -422,7 +422,7 @@ void test_vluxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1(
@@ -441,7 +441,7 @@ void test_vluxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2(
@@ -460,7 +460,7 @@ void test_vluxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1(
@@ -479,7 +479,7 @@ void test_vluxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1(
@@ -498,7 +498,7 @@ void test_vluxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_m(
@@ -517,7 +517,7 @@ void test_vluxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_m(
@@ -536,7 +536,7 @@ void test_vluxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m(
@@ -555,7 +555,7 @@ void test_vluxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m(
@@ -574,7 +574,7 @@ void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_m(
@@ -593,7 +593,7 @@ void test_vluxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_m(
@@ -612,7 +612,7 @@ void test_vluxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m(
@@ -631,7 +631,7 @@ void test_vluxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m(
@@ -650,7 +650,7 @@ void test_vluxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m(
@@ -669,7 +669,7 @@ void test_vluxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vluxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m(
@@ -707,7 +707,7 @@ void test_vluxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m(
@@ -726,7 +726,7 @@ void test_vluxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_m(
@@ -745,7 +745,7 @@ void test_vluxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m(
@@ -764,7 +764,7 @@ void test_vluxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_m(
@@ -783,7 +783,7 @@ void test_vluxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_m(
@@ -802,7 +802,7 @@ void test_vluxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m(
@@ -821,7 +821,7 @@ void test_vluxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m(
@@ -840,7 +840,7 @@ void test_vluxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m(
@@ -859,7 +859,7 @@ void test_vluxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_m(
@@ -878,7 +878,7 @@ void test_vluxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m(
@@ -897,7 +897,7 @@ void test_vluxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m(
@@ -916,7 +916,7 @@ void test_vluxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_m(
@@ -935,7 +935,7 @@ void test_vluxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m(
@@ -954,7 +954,7 @@ void test_vluxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_m(
@@ -973,7 +973,7 @@ void test_vluxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_m(
@@ -992,6 +992,6 @@ void test_vluxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
+ return __riscv_vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c
index 396a902fc72b..b7314869df2f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2(
@@ -46,7 +46,7 @@ void test_vluxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1(
@@ -67,7 +67,7 @@ void test_vluxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2(
@@ -88,7 +88,7 @@ void test_vluxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1(
@@ -109,7 +109,7 @@ void test_vluxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1(
@@ -130,7 +130,7 @@ void test_vluxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8(
@@ -151,7 +151,7 @@ void test_vluxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4(
@@ -172,7 +172,7 @@ void test_vluxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2(
@@ -193,7 +193,7 @@ void test_vluxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1(
@@ -214,7 +214,7 @@ void test_vluxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4(
@@ -235,7 +235,7 @@ void test_vluxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2(
@@ -256,7 +256,7 @@ void test_vluxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1(
@@ -277,7 +277,7 @@ void test_vluxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2(
@@ -298,7 +298,7 @@ void test_vluxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1(
@@ -319,7 +319,7 @@ void test_vluxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1(
@@ -340,7 +340,7 @@ void test_vluxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8(
@@ -361,7 +361,7 @@ void test_vluxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4(
@@ -382,7 +382,7 @@ void test_vluxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2(
@@ -403,7 +403,7 @@ void test_vluxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1(
@@ -424,7 +424,7 @@ void test_vluxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4(
@@ -445,7 +445,7 @@ void test_vluxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2(
@@ -466,7 +466,7 @@ void test_vluxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1(
@@ -487,7 +487,7 @@ void test_vluxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2(
@@ -508,7 +508,7 @@ void test_vluxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1(
@@ -529,7 +529,7 @@ void test_vluxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1(
@@ -550,7 +550,7 @@ void test_vluxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_m(
@@ -571,7 +571,7 @@ void test_vluxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_m(
@@ -592,7 +592,7 @@ void test_vluxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m(
@@ -613,7 +613,7 @@ void test_vluxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_m(
@@ -655,7 +655,7 @@ void test_vluxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_m(
@@ -676,7 +676,7 @@ void test_vluxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m(
@@ -697,7 +697,7 @@ void test_vluxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m(
@@ -718,7 +718,7 @@ void test_vluxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vluxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_m(
@@ -760,7 +760,7 @@ void test_vluxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m(
@@ -781,7 +781,7 @@ void test_vluxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m(
@@ -802,7 +802,7 @@ void test_vluxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_m(
@@ -823,7 +823,7 @@ void test_vluxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vluxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_m(
@@ -865,7 +865,7 @@ void test_vluxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_m(
@@ -886,7 +886,7 @@ void test_vluxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m(
@@ -907,7 +907,7 @@ void test_vluxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m(
@@ -928,7 +928,7 @@ void test_vluxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vluxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_m(
@@ -970,7 +970,7 @@ void test_vluxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m(
@@ -991,7 +991,7 @@ void test_vluxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m(
@@ -1012,7 +1012,7 @@ void test_vluxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_m(
@@ -1033,7 +1033,7 @@ void test_vluxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_m(
@@ -1075,7 +1075,7 @@ void test_vluxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_m(
@@ -1096,6 +1096,6 @@ void test_vluxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c
index 792e7e552bc9..f791e6adf601 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2(
@@ -46,7 +46,7 @@ void test_vluxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1(
@@ -67,7 +67,7 @@ void test_vluxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2(
@@ -88,7 +88,7 @@ void test_vluxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1(
@@ -109,7 +109,7 @@ void test_vluxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1(
@@ -130,7 +130,7 @@ void test_vluxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8(
@@ -151,7 +151,7 @@ void test_vluxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4(
@@ -172,7 +172,7 @@ void test_vluxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2(
@@ -193,7 +193,7 @@ void test_vluxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1(
@@ -214,7 +214,7 @@ void test_vluxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4(
@@ -235,7 +235,7 @@ void test_vluxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2(
@@ -256,7 +256,7 @@ void test_vluxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1(
@@ -277,7 +277,7 @@ void test_vluxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2(
@@ -298,7 +298,7 @@ void test_vluxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1(
@@ -319,7 +319,7 @@ void test_vluxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1(
@@ -340,7 +340,7 @@ void test_vluxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8(
@@ -361,7 +361,7 @@ void test_vluxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4(
@@ -382,7 +382,7 @@ void test_vluxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2(
@@ -403,7 +403,7 @@ void test_vluxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1(
@@ -424,7 +424,7 @@ void test_vluxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4(
@@ -445,7 +445,7 @@ void test_vluxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2(
@@ -466,7 +466,7 @@ void test_vluxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1(
@@ -487,7 +487,7 @@ void test_vluxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2(
@@ -508,7 +508,7 @@ void test_vluxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1(
@@ -529,7 +529,7 @@ void test_vluxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1(
@@ -550,7 +550,7 @@ void test_vluxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_m(
@@ -571,7 +571,7 @@ void test_vluxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_m(
@@ -592,7 +592,7 @@ void test_vluxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m(
@@ -613,7 +613,7 @@ void test_vluxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_m(
@@ -655,7 +655,7 @@ void test_vluxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_m(
@@ -676,7 +676,7 @@ void test_vluxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m(
@@ -697,7 +697,7 @@ void test_vluxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m(
@@ -718,7 +718,7 @@ void test_vluxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vluxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m(
@@ -760,7 +760,7 @@ void test_vluxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m(
@@ -781,7 +781,7 @@ void test_vluxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m(
@@ -802,7 +802,7 @@ void test_vluxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_m(
@@ -823,7 +823,7 @@ void test_vluxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vluxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_m(
@@ -865,7 +865,7 @@ void test_vluxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_m(
@@ -886,7 +886,7 @@ void test_vluxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m(
@@ -907,7 +907,7 @@ void test_vluxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m(
@@ -928,7 +928,7 @@ void test_vluxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vluxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m(
@@ -970,7 +970,7 @@ void test_vluxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m(
@@ -991,7 +991,7 @@ void test_vluxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m(
@@ -1012,7 +1012,7 @@ void test_vluxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_m(
@@ -1033,7 +1033,7 @@ void test_vluxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_m(
@@ -1075,7 +1075,7 @@ void test_vluxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_m(
@@ -1096,6 +1096,6 @@ void test_vluxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c
index 59e52ab642d8..fb32c2166f6a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2(
@@ -46,7 +46,7 @@ void test_vluxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1(
@@ -67,7 +67,7 @@ void test_vluxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2(
@@ -88,7 +88,7 @@ void test_vluxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1(
@@ -109,7 +109,7 @@ void test_vluxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1(
@@ -130,7 +130,7 @@ void test_vluxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8(
@@ -151,7 +151,7 @@ void test_vluxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4(
@@ -172,7 +172,7 @@ void test_vluxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2(
@@ -193,7 +193,7 @@ void test_vluxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1(
@@ -214,7 +214,7 @@ void test_vluxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4(
@@ -235,7 +235,7 @@ void test_vluxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2(
@@ -256,7 +256,7 @@ void test_vluxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1(
@@ -277,7 +277,7 @@ void test_vluxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2(
@@ -298,7 +298,7 @@ void test_vluxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1(
@@ -319,7 +319,7 @@ void test_vluxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1(
@@ -340,7 +340,7 @@ void test_vluxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8(
@@ -361,7 +361,7 @@ void test_vluxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4(
@@ -382,7 +382,7 @@ void test_vluxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2(
@@ -403,7 +403,7 @@ void test_vluxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1(
@@ -424,7 +424,7 @@ void test_vluxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4(
@@ -445,7 +445,7 @@ void test_vluxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2(
@@ -466,7 +466,7 @@ void test_vluxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1(
@@ -487,7 +487,7 @@ void test_vluxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2(
@@ -508,7 +508,7 @@ void test_vluxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1(
@@ -529,7 +529,7 @@ void test_vluxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1(
@@ -550,7 +550,7 @@ void test_vluxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_m(
@@ -571,7 +571,7 @@ void test_vluxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_m(
@@ -592,7 +592,7 @@ void test_vluxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m(
@@ -613,7 +613,7 @@ void test_vluxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_m(
@@ -655,7 +655,7 @@ void test_vluxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_m(
@@ -676,7 +676,7 @@ void test_vluxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m(
@@ -697,7 +697,7 @@ void test_vluxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m(
@@ -718,7 +718,7 @@ void test_vluxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vluxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_m(
@@ -760,7 +760,7 @@ void test_vluxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m(
@@ -781,7 +781,7 @@ void test_vluxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m(
@@ -802,7 +802,7 @@ void test_vluxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_m(
@@ -823,7 +823,7 @@ void test_vluxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vluxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_m(
@@ -865,7 +865,7 @@ void test_vluxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_m(
@@ -886,7 +886,7 @@ void test_vluxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m(
@@ -907,7 +907,7 @@ void test_vluxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m(
@@ -928,7 +928,7 @@ void test_vluxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vluxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_m(
@@ -970,7 +970,7 @@ void test_vluxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m(
@@ -991,7 +991,7 @@ void test_vluxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m(
@@ -1012,7 +1012,7 @@ void test_vluxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_m(
@@ -1033,7 +1033,7 @@ void test_vluxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_m(
@@ -1075,7 +1075,7 @@ void test_vluxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_m(
@@ -1096,6 +1096,6 @@ void test_vluxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c
index 853962516bb9..7f8d457f0145 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2(
@@ -46,7 +46,7 @@ void test_vluxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1(
@@ -67,7 +67,7 @@ void test_vluxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2(
@@ -88,7 +88,7 @@ void test_vluxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1(
@@ -109,7 +109,7 @@ void test_vluxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1(
@@ -130,7 +130,7 @@ void test_vluxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8(
@@ -151,7 +151,7 @@ void test_vluxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4(
@@ -172,7 +172,7 @@ void test_vluxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2(
@@ -193,7 +193,7 @@ void test_vluxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1(
@@ -214,7 +214,7 @@ void test_vluxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4(
@@ -235,7 +235,7 @@ void test_vluxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2(
@@ -256,7 +256,7 @@ void test_vluxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1(
@@ -277,7 +277,7 @@ void test_vluxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2(
@@ -298,7 +298,7 @@ void test_vluxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1(
@@ -319,7 +319,7 @@ void test_vluxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1(
@@ -340,7 +340,7 @@ void test_vluxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8(
@@ -361,7 +361,7 @@ void test_vluxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4(
@@ -382,7 +382,7 @@ void test_vluxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2(
@@ -403,7 +403,7 @@ void test_vluxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1(
@@ -424,7 +424,7 @@ void test_vluxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4(
@@ -445,7 +445,7 @@ void test_vluxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2(
@@ -466,7 +466,7 @@ void test_vluxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1(
@@ -487,7 +487,7 @@ void test_vluxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2(
@@ -508,7 +508,7 @@ void test_vluxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1(
@@ -529,7 +529,7 @@ void test_vluxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1(
@@ -550,7 +550,7 @@ void test_vluxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_m(
@@ -571,7 +571,7 @@ void test_vluxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_m(
@@ -592,7 +592,7 @@ void test_vluxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m(
@@ -613,7 +613,7 @@ void test_vluxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m(
@@ -634,7 +634,7 @@ void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_m(
@@ -655,7 +655,7 @@ void test_vluxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_m(
@@ -676,7 +676,7 @@ void test_vluxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m(
@@ -697,7 +697,7 @@ void test_vluxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m(
@@ -718,7 +718,7 @@ void test_vluxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m(
@@ -739,7 +739,7 @@ void test_vluxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_m(
@@ -760,7 +760,7 @@ void test_vluxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m(
@@ -781,7 +781,7 @@ void test_vluxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m(
@@ -802,7 +802,7 @@ void test_vluxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_m(
@@ -823,7 +823,7 @@ void test_vluxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m(
@@ -844,7 +844,7 @@ void test_vluxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_m(
@@ -865,7 +865,7 @@ void test_vluxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_m(
@@ -886,7 +886,7 @@ void test_vluxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m(
@@ -907,7 +907,7 @@ void test_vluxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m(
@@ -928,7 +928,7 @@ void test_vluxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m(
@@ -949,7 +949,7 @@ void test_vluxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_m(
@@ -970,7 +970,7 @@ void test_vluxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m(
@@ -991,7 +991,7 @@ void test_vluxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m(
@@ -1012,7 +1012,7 @@ void test_vluxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_m(
@@ -1033,7 +1033,7 @@ void test_vluxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_m(
@@ -1075,7 +1075,7 @@ void test_vluxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_m(
@@ -1096,6 +1096,6 @@ void test_vluxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
+ return __riscv_vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c
index ee06b21a5b22..41e6568c6467 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2(
@@ -50,7 +50,7 @@ void test_vluxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1(
@@ -73,7 +73,7 @@ void test_vluxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2(
@@ -96,7 +96,7 @@ void test_vluxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1(
@@ -119,7 +119,7 @@ void test_vluxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1(
@@ -142,7 +142,7 @@ void test_vluxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8(
@@ -165,7 +165,7 @@ void test_vluxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4(
@@ -188,7 +188,7 @@ void test_vluxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2(
@@ -211,7 +211,7 @@ void test_vluxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1(
@@ -234,7 +234,7 @@ void test_vluxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4(
@@ -257,7 +257,7 @@ void test_vluxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2(
@@ -280,7 +280,7 @@ void test_vluxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1(
@@ -303,7 +303,7 @@ void test_vluxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2(
@@ -326,7 +326,7 @@ void test_vluxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1(
@@ -349,7 +349,7 @@ void test_vluxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1(
@@ -372,7 +372,7 @@ void test_vluxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vluxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4(
@@ -418,7 +418,7 @@ void test_vluxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2(
@@ -441,7 +441,7 @@ void test_vluxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1(
@@ -464,7 +464,7 @@ void test_vluxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4(
@@ -487,7 +487,7 @@ void test_vluxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2(
@@ -510,7 +510,7 @@ void test_vluxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1(
@@ -533,7 +533,7 @@ void test_vluxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2(
@@ -556,7 +556,7 @@ void test_vluxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1(
@@ -579,7 +579,7 @@ void test_vluxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1(
@@ -602,7 +602,7 @@ void test_vluxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_m(
@@ -625,7 +625,7 @@ void test_vluxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_m(
@@ -648,7 +648,7 @@ void test_vluxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m(
@@ -671,7 +671,7 @@ void test_vluxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m(
@@ -694,7 +694,7 @@ void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_m(
@@ -717,7 +717,7 @@ void test_vluxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_m(
@@ -740,7 +740,7 @@ void test_vluxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m(
@@ -763,7 +763,7 @@ void test_vluxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vluxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m(
@@ -809,7 +809,7 @@ void test_vluxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_m(
@@ -832,7 +832,7 @@ void test_vluxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m(
@@ -855,7 +855,7 @@ void test_vluxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m(
@@ -878,7 +878,7 @@ void test_vluxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vluxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m(
@@ -924,7 +924,7 @@ void test_vluxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_m(
@@ -947,7 +947,7 @@ void test_vluxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_m(
@@ -970,7 +970,7 @@ void test_vluxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m(
@@ -993,7 +993,7 @@ void test_vluxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m(
@@ -1016,7 +1016,7 @@ void test_vluxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m(
@@ -1039,7 +1039,7 @@ void test_vluxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_m(
@@ -1062,7 +1062,7 @@ void test_vluxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m(
@@ -1085,7 +1085,7 @@ void test_vluxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m(
@@ -1108,7 +1108,7 @@ void test_vluxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_m(
@@ -1131,7 +1131,7 @@ void test_vluxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m(
@@ -1154,7 +1154,7 @@ void test_vluxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_m(
@@ -1177,7 +1177,7 @@ void test_vluxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_m(
@@ -1200,6 +1200,6 @@ void test_vluxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c
index cca45a49b722..f2d70ddfb3de 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2(
@@ -50,7 +50,7 @@ void test_vluxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1(
@@ -73,7 +73,7 @@ void test_vluxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2(
@@ -96,7 +96,7 @@ void test_vluxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1(
@@ -119,7 +119,7 @@ void test_vluxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1(
@@ -142,7 +142,7 @@ void test_vluxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8(
@@ -165,7 +165,7 @@ void test_vluxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4(
@@ -188,7 +188,7 @@ void test_vluxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2(
@@ -211,7 +211,7 @@ void test_vluxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1(
@@ -234,7 +234,7 @@ void test_vluxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4(
@@ -257,7 +257,7 @@ void test_vluxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2(
@@ -280,7 +280,7 @@ void test_vluxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1(
@@ -303,7 +303,7 @@ void test_vluxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2(
@@ -326,7 +326,7 @@ void test_vluxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1(
@@ -349,7 +349,7 @@ void test_vluxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1(
@@ -372,7 +372,7 @@ void test_vluxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vluxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4(
@@ -418,7 +418,7 @@ void test_vluxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2(
@@ -441,7 +441,7 @@ void test_vluxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1(
@@ -464,7 +464,7 @@ void test_vluxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4(
@@ -487,7 +487,7 @@ void test_vluxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2(
@@ -510,7 +510,7 @@ void test_vluxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1(
@@ -533,7 +533,7 @@ void test_vluxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2(
@@ -556,7 +556,7 @@ void test_vluxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1(
@@ -579,7 +579,7 @@ void test_vluxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1(
@@ -602,7 +602,7 @@ void test_vluxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_m(
@@ -625,7 +625,7 @@ void test_vluxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_m(
@@ -648,7 +648,7 @@ void test_vluxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m(
@@ -671,7 +671,7 @@ void test_vluxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m(
@@ -694,7 +694,7 @@ void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_m(
@@ -717,7 +717,7 @@ void test_vluxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_m(
@@ -740,7 +740,7 @@ void test_vluxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m(
@@ -763,7 +763,7 @@ void test_vluxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vluxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m(
@@ -809,7 +809,7 @@ void test_vluxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_m(
@@ -832,7 +832,7 @@ void test_vluxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m(
@@ -855,7 +855,7 @@ void test_vluxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m(
@@ -878,7 +878,7 @@ void test_vluxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vluxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m(
@@ -924,7 +924,7 @@ void test_vluxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_m(
@@ -947,7 +947,7 @@ void test_vluxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_m(
@@ -970,7 +970,7 @@ void test_vluxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m(
@@ -993,7 +993,7 @@ void test_vluxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m(
@@ -1016,7 +1016,7 @@ void test_vluxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m(
@@ -1039,7 +1039,7 @@ void test_vluxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m(
@@ -1062,7 +1062,7 @@ void test_vluxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m(
@@ -1085,7 +1085,7 @@ void test_vluxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m(
@@ -1108,7 +1108,7 @@ void test_vluxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_m(
@@ -1131,7 +1131,7 @@ void test_vluxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m(
@@ -1154,7 +1154,7 @@ void test_vluxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_m(
@@ -1177,7 +1177,7 @@ void test_vluxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_m(
@@ -1200,6 +1200,6 @@ void test_vluxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c
index d343409c0c4c..0905d9a6ef3a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2(
@@ -50,7 +50,7 @@ void test_vluxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1(
@@ -73,7 +73,7 @@ void test_vluxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2(
@@ -96,7 +96,7 @@ void test_vluxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1(
@@ -119,7 +119,7 @@ void test_vluxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1(
@@ -142,7 +142,7 @@ void test_vluxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8(
@@ -165,7 +165,7 @@ void test_vluxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4(
@@ -188,7 +188,7 @@ void test_vluxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2(
@@ -211,7 +211,7 @@ void test_vluxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1(
@@ -234,7 +234,7 @@ void test_vluxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4(
@@ -257,7 +257,7 @@ void test_vluxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2(
@@ -280,7 +280,7 @@ void test_vluxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1(
@@ -303,7 +303,7 @@ void test_vluxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2(
@@ -326,7 +326,7 @@ void test_vluxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1(
@@ -349,7 +349,7 @@ void test_vluxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1(
@@ -372,7 +372,7 @@ void test_vluxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vluxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4(
@@ -418,7 +418,7 @@ void test_vluxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2(
@@ -441,7 +441,7 @@ void test_vluxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1(
@@ -464,7 +464,7 @@ void test_vluxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4(
@@ -487,7 +487,7 @@ void test_vluxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2(
@@ -510,7 +510,7 @@ void test_vluxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1(
@@ -533,7 +533,7 @@ void test_vluxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2(
@@ -556,7 +556,7 @@ void test_vluxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1(
@@ -579,7 +579,7 @@ void test_vluxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1(
@@ -602,7 +602,7 @@ void test_vluxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_m(
@@ -625,7 +625,7 @@ void test_vluxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_m(
@@ -648,7 +648,7 @@ void test_vluxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m(
@@ -671,7 +671,7 @@ void test_vluxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m(
@@ -694,7 +694,7 @@ void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_m(
@@ -717,7 +717,7 @@ void test_vluxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_m(
@@ -740,7 +740,7 @@ void test_vluxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m(
@@ -763,7 +763,7 @@ void test_vluxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vluxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m(
@@ -809,7 +809,7 @@ void test_vluxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_m(
@@ -832,7 +832,7 @@ void test_vluxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m(
@@ -855,7 +855,7 @@ void test_vluxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m(
@@ -878,7 +878,7 @@ void test_vluxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vluxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m(
@@ -924,7 +924,7 @@ void test_vluxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_m(
@@ -947,7 +947,7 @@ void test_vluxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_m(
@@ -970,7 +970,7 @@ void test_vluxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m(
@@ -993,7 +993,7 @@ void test_vluxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m(
@@ -1016,7 +1016,7 @@ void test_vluxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m(
@@ -1039,7 +1039,7 @@ void test_vluxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_m(
@@ -1062,7 +1062,7 @@ void test_vluxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m(
@@ -1085,7 +1085,7 @@ void test_vluxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m(
@@ -1108,7 +1108,7 @@ void test_vluxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_m(
@@ -1131,7 +1131,7 @@ void test_vluxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m(
@@ -1154,7 +1154,7 @@ void test_vluxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_m(
@@ -1177,7 +1177,7 @@ void test_vluxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_m(
@@ -1200,6 +1200,6 @@ void test_vluxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c
index 337a76909949..ba78a16e10c3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2(
@@ -50,7 +50,7 @@ void test_vluxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1(
@@ -73,7 +73,7 @@ void test_vluxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2(
@@ -96,7 +96,7 @@ void test_vluxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1(
@@ -119,7 +119,7 @@ void test_vluxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1(
@@ -142,7 +142,7 @@ void test_vluxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8(
@@ -165,7 +165,7 @@ void test_vluxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4(
@@ -188,7 +188,7 @@ void test_vluxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2(
@@ -211,7 +211,7 @@ void test_vluxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1(
@@ -234,7 +234,7 @@ void test_vluxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4(
@@ -257,7 +257,7 @@ void test_vluxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2(
@@ -280,7 +280,7 @@ void test_vluxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1(
@@ -303,7 +303,7 @@ void test_vluxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2(
@@ -326,7 +326,7 @@ void test_vluxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1(
@@ -349,7 +349,7 @@ void test_vluxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1(
@@ -372,7 +372,7 @@ void test_vluxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8(
@@ -395,7 +395,7 @@ void test_vluxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4(
@@ -418,7 +418,7 @@ void test_vluxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2(
@@ -441,7 +441,7 @@ void test_vluxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1(
@@ -464,7 +464,7 @@ void test_vluxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4(
@@ -487,7 +487,7 @@ void test_vluxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2(
@@ -510,7 +510,7 @@ void test_vluxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1(
@@ -533,7 +533,7 @@ void test_vluxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2(
@@ -556,7 +556,7 @@ void test_vluxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1(
@@ -579,7 +579,7 @@ void test_vluxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1(
@@ -602,7 +602,7 @@ void test_vluxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_m(
@@ -625,7 +625,7 @@ void test_vluxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_m(
@@ -648,7 +648,7 @@ void test_vluxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m(
@@ -671,7 +671,7 @@ void test_vluxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m(
@@ -694,7 +694,7 @@ void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_m(
@@ -717,7 +717,7 @@ void test_vluxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_m(
@@ -740,7 +740,7 @@ void test_vluxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m(
@@ -763,7 +763,7 @@ void test_vluxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m(
@@ -786,7 +786,7 @@ void test_vluxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m(
@@ -809,7 +809,7 @@ void test_vluxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_m(
@@ -832,7 +832,7 @@ void test_vluxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m(
@@ -855,7 +855,7 @@ void test_vluxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m(
@@ -878,7 +878,7 @@ void test_vluxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_m(
@@ -901,7 +901,7 @@ void test_vluxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m(
@@ -924,7 +924,7 @@ void test_vluxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_m(
@@ -947,7 +947,7 @@ void test_vluxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_m(
@@ -970,7 +970,7 @@ void test_vluxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m(
@@ -993,7 +993,7 @@ void test_vluxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m(
@@ -1016,7 +1016,7 @@ void test_vluxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m(
@@ -1039,7 +1039,7 @@ void test_vluxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_m(
@@ -1062,7 +1062,7 @@ void test_vluxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m(
@@ -1085,7 +1085,7 @@ void test_vluxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m(
@@ -1108,7 +1108,7 @@ void test_vluxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_m(
@@ -1131,7 +1131,7 @@ void test_vluxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m(
@@ -1154,7 +1154,7 @@ void test_vluxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_m(
@@ -1177,7 +1177,7 @@ void test_vluxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_m(
@@ -1200,6 +1200,6 @@ void test_vluxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
+ return __riscv_vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c
index 9fa2c9c420e8..2d9a3cb1a1ea 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2(
@@ -54,7 +54,7 @@ void test_vluxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1(
@@ -79,7 +79,7 @@ void test_vluxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2(
@@ -104,7 +104,7 @@ void test_vluxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1(
@@ -129,7 +129,7 @@ void test_vluxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1(
@@ -154,7 +154,7 @@ void test_vluxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8(
@@ -179,7 +179,7 @@ void test_vluxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4(
@@ -204,7 +204,7 @@ void test_vluxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2(
@@ -229,7 +229,7 @@ void test_vluxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1(
@@ -254,7 +254,7 @@ void test_vluxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4(
@@ -279,7 +279,7 @@ void test_vluxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2(
@@ -304,7 +304,7 @@ void test_vluxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1(
@@ -329,7 +329,7 @@ void test_vluxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2(
@@ -354,7 +354,7 @@ void test_vluxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1(
@@ -379,7 +379,7 @@ void test_vluxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1(
@@ -404,7 +404,7 @@ void test_vluxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8(
@@ -429,7 +429,7 @@ void test_vluxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4(
@@ -454,7 +454,7 @@ void test_vluxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2(
@@ -479,7 +479,7 @@ void test_vluxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1(
@@ -504,7 +504,7 @@ void test_vluxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4(
@@ -529,7 +529,7 @@ void test_vluxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2(
@@ -554,7 +554,7 @@ void test_vluxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1(
@@ -579,7 +579,7 @@ void test_vluxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2(
@@ -604,7 +604,7 @@ void test_vluxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1(
@@ -629,7 +629,7 @@ void test_vluxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1(
@@ -654,7 +654,7 @@ void test_vluxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_m(
@@ -679,7 +679,7 @@ void test_vluxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_m(
@@ -704,7 +704,7 @@ void test_vluxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m(
@@ -729,7 +729,7 @@ void test_vluxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m(
@@ -754,7 +754,7 @@ void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_m(
@@ -779,7 +779,7 @@ void test_vluxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_m(
@@ -804,7 +804,7 @@ void test_vluxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m(
@@ -829,7 +829,7 @@ void test_vluxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m(
@@ -854,7 +854,7 @@ void test_vluxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m(
@@ -879,7 +879,7 @@ void test_vluxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_m(
@@ -904,7 +904,7 @@ void test_vluxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m(
@@ -929,7 +929,7 @@ void test_vluxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m(
@@ -954,7 +954,7 @@ void test_vluxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_m(
@@ -979,7 +979,7 @@ void test_vluxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m(
@@ -1004,7 +1004,7 @@ void test_vluxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_m(
@@ -1029,7 +1029,7 @@ void test_vluxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m(
@@ -1079,7 +1079,7 @@ void test_vluxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m(
@@ -1104,7 +1104,7 @@ void test_vluxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m(
@@ -1129,7 +1129,7 @@ void test_vluxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_m(
@@ -1154,7 +1154,7 @@ void test_vluxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m(
@@ -1179,7 +1179,7 @@ void test_vluxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m(
@@ -1204,7 +1204,7 @@ void test_vluxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_m(
@@ -1229,7 +1229,7 @@ void test_vluxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m(
@@ -1254,7 +1254,7 @@ void test_vluxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_m(
@@ -1279,7 +1279,7 @@ void test_vluxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_m(
@@ -1304,6 +1304,6 @@ void test_vluxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c
index 9d61dcf55ff6..bd3690942178 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2(
@@ -54,7 +54,7 @@ void test_vluxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1(
@@ -79,7 +79,7 @@ void test_vluxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2(
@@ -104,7 +104,7 @@ void test_vluxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1(
@@ -129,7 +129,7 @@ void test_vluxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1(
@@ -154,7 +154,7 @@ void test_vluxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8(
@@ -179,7 +179,7 @@ void test_vluxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4(
@@ -204,7 +204,7 @@ void test_vluxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2(
@@ -229,7 +229,7 @@ void test_vluxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1(
@@ -254,7 +254,7 @@ void test_vluxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4(
@@ -279,7 +279,7 @@ void test_vluxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2(
@@ -304,7 +304,7 @@ void test_vluxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1(
@@ -329,7 +329,7 @@ void test_vluxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2(
@@ -354,7 +354,7 @@ void test_vluxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1(
@@ -379,7 +379,7 @@ void test_vluxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1(
@@ -404,7 +404,7 @@ void test_vluxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8(
@@ -429,7 +429,7 @@ void test_vluxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4(
@@ -454,7 +454,7 @@ void test_vluxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2(
@@ -479,7 +479,7 @@ void test_vluxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1(
@@ -504,7 +504,7 @@ void test_vluxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4(
@@ -529,7 +529,7 @@ void test_vluxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2(
@@ -554,7 +554,7 @@ void test_vluxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1(
@@ -579,7 +579,7 @@ void test_vluxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2(
@@ -604,7 +604,7 @@ void test_vluxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1(
@@ -629,7 +629,7 @@ void test_vluxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1(
@@ -654,7 +654,7 @@ void test_vluxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_m(
@@ -679,7 +679,7 @@ void test_vluxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_m(
@@ -704,7 +704,7 @@ void test_vluxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m(
@@ -729,7 +729,7 @@ void test_vluxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m(
@@ -754,7 +754,7 @@ void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_m(
@@ -779,7 +779,7 @@ void test_vluxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_m(
@@ -804,7 +804,7 @@ void test_vluxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m(
@@ -829,7 +829,7 @@ void test_vluxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m(
@@ -854,7 +854,7 @@ void test_vluxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m(
@@ -879,7 +879,7 @@ void test_vluxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_m(
@@ -904,7 +904,7 @@ void test_vluxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m(
@@ -929,7 +929,7 @@ void test_vluxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m(
@@ -954,7 +954,7 @@ void test_vluxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_m(
@@ -979,7 +979,7 @@ void test_vluxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m(
@@ -1004,7 +1004,7 @@ void test_vluxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_m(
@@ -1029,7 +1029,7 @@ void test_vluxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m(
@@ -1079,7 +1079,7 @@ void test_vluxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m(
@@ -1104,7 +1104,7 @@ void test_vluxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m(
@@ -1129,7 +1129,7 @@ void test_vluxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m(
@@ -1154,7 +1154,7 @@ void test_vluxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m(
@@ -1179,7 +1179,7 @@ void test_vluxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m(
@@ -1204,7 +1204,7 @@ void test_vluxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_m(
@@ -1229,7 +1229,7 @@ void test_vluxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m(
@@ -1254,7 +1254,7 @@ void test_vluxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_m(
@@ -1279,7 +1279,7 @@ void test_vluxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_m(
@@ -1304,6 +1304,6 @@ void test_vluxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c
index 1b72bdd018d1..f73f0b28e567 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2(
@@ -54,7 +54,7 @@ void test_vluxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1(
@@ -79,7 +79,7 @@ void test_vluxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2(
@@ -104,7 +104,7 @@ void test_vluxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1(
@@ -129,7 +129,7 @@ void test_vluxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1(
@@ -154,7 +154,7 @@ void test_vluxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8(
@@ -179,7 +179,7 @@ void test_vluxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4(
@@ -204,7 +204,7 @@ void test_vluxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2(
@@ -229,7 +229,7 @@ void test_vluxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1(
@@ -254,7 +254,7 @@ void test_vluxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4(
@@ -279,7 +279,7 @@ void test_vluxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2(
@@ -304,7 +304,7 @@ void test_vluxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1(
@@ -329,7 +329,7 @@ void test_vluxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2(
@@ -354,7 +354,7 @@ void test_vluxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1(
@@ -379,7 +379,7 @@ void test_vluxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1(
@@ -404,7 +404,7 @@ void test_vluxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8(
@@ -429,7 +429,7 @@ void test_vluxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4(
@@ -454,7 +454,7 @@ void test_vluxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2(
@@ -479,7 +479,7 @@ void test_vluxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1(
@@ -504,7 +504,7 @@ void test_vluxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4(
@@ -529,7 +529,7 @@ void test_vluxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2(
@@ -554,7 +554,7 @@ void test_vluxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1(
@@ -579,7 +579,7 @@ void test_vluxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2(
@@ -604,7 +604,7 @@ void test_vluxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1(
@@ -629,7 +629,7 @@ void test_vluxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1(
@@ -654,7 +654,7 @@ void test_vluxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_m(
@@ -679,7 +679,7 @@ void test_vluxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_m(
@@ -704,7 +704,7 @@ void test_vluxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m(
@@ -729,7 +729,7 @@ void test_vluxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m(
@@ -754,7 +754,7 @@ void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_m(
@@ -779,7 +779,7 @@ void test_vluxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_m(
@@ -804,7 +804,7 @@ void test_vluxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m(
@@ -829,7 +829,7 @@ void test_vluxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m(
@@ -854,7 +854,7 @@ void test_vluxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m(
@@ -879,7 +879,7 @@ void test_vluxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_m(
@@ -904,7 +904,7 @@ void test_vluxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m(
@@ -929,7 +929,7 @@ void test_vluxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m(
@@ -954,7 +954,7 @@ void test_vluxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_m(
@@ -979,7 +979,7 @@ void test_vluxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m(
@@ -1004,7 +1004,7 @@ void test_vluxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_m(
@@ -1029,7 +1029,7 @@ void test_vluxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m(
@@ -1079,7 +1079,7 @@ void test_vluxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m(
@@ -1104,7 +1104,7 @@ void test_vluxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m(
@@ -1129,7 +1129,7 @@ void test_vluxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_m(
@@ -1154,7 +1154,7 @@ void test_vluxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m(
@@ -1179,7 +1179,7 @@ void test_vluxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m(
@@ -1204,7 +1204,7 @@ void test_vluxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_m(
@@ -1229,7 +1229,7 @@ void test_vluxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m(
@@ -1254,7 +1254,7 @@ void test_vluxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_m(
@@ -1279,7 +1279,7 @@ void test_vluxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_m(
@@ -1304,6 +1304,6 @@ void test_vluxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c
index 3ea8b753e76c..b4c0283bcd30 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2(
@@ -54,7 +54,7 @@ void test_vluxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1(
@@ -79,7 +79,7 @@ void test_vluxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2(
@@ -104,7 +104,7 @@ void test_vluxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1(
@@ -129,7 +129,7 @@ void test_vluxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1(
@@ -154,7 +154,7 @@ void test_vluxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8(
@@ -179,7 +179,7 @@ void test_vluxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4(
@@ -204,7 +204,7 @@ void test_vluxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2(
@@ -229,7 +229,7 @@ void test_vluxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1(
@@ -254,7 +254,7 @@ void test_vluxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4(
@@ -279,7 +279,7 @@ void test_vluxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2(
@@ -304,7 +304,7 @@ void test_vluxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1(
@@ -329,7 +329,7 @@ void test_vluxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2(
@@ -354,7 +354,7 @@ void test_vluxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1(
@@ -379,7 +379,7 @@ void test_vluxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1(
@@ -404,7 +404,7 @@ void test_vluxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8(
@@ -429,7 +429,7 @@ void test_vluxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4(
@@ -454,7 +454,7 @@ void test_vluxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2(
@@ -479,7 +479,7 @@ void test_vluxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1(
@@ -504,7 +504,7 @@ void test_vluxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4(
@@ -529,7 +529,7 @@ void test_vluxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2(
@@ -554,7 +554,7 @@ void test_vluxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1(
@@ -579,7 +579,7 @@ void test_vluxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2(
@@ -604,7 +604,7 @@ void test_vluxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1(
@@ -629,7 +629,7 @@ void test_vluxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1(
@@ -654,7 +654,7 @@ void test_vluxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_m(
@@ -679,7 +679,7 @@ void test_vluxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_m(
@@ -704,7 +704,7 @@ void test_vluxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m(
@@ -729,7 +729,7 @@ void test_vluxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m(
@@ -754,7 +754,7 @@ void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_m(
@@ -779,7 +779,7 @@ void test_vluxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_m(
@@ -804,7 +804,7 @@ void test_vluxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m(
@@ -829,7 +829,7 @@ void test_vluxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m(
@@ -854,7 +854,7 @@ void test_vluxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m(
@@ -879,7 +879,7 @@ void test_vluxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_m(
@@ -904,7 +904,7 @@ void test_vluxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m(
@@ -929,7 +929,7 @@ void test_vluxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m(
@@ -954,7 +954,7 @@ void test_vluxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_m(
@@ -979,7 +979,7 @@ void test_vluxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m(
@@ -1004,7 +1004,7 @@ void test_vluxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_m(
@@ -1029,7 +1029,7 @@ void test_vluxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_m(
@@ -1054,7 +1054,7 @@ void test_vluxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m(
@@ -1079,7 +1079,7 @@ void test_vluxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m(
@@ -1104,7 +1104,7 @@ void test_vluxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m(
@@ -1129,7 +1129,7 @@ void test_vluxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_m(
@@ -1154,7 +1154,7 @@ void test_vluxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m(
@@ -1179,7 +1179,7 @@ void test_vluxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m(
@@ -1204,7 +1204,7 @@ void test_vluxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_m(
@@ -1229,7 +1229,7 @@ void test_vluxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m(
@@ -1254,7 +1254,7 @@ void test_vluxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_m(
@@ -1279,7 +1279,7 @@ void test_vluxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_m(
@@ -1304,6 +1304,6 @@ void test_vluxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
+ return __riscv_vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c
index 20d6875edb37..2d85a88946ec 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8(
@@ -22,7 +22,7 @@ vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4(
@@ -31,7 +31,7 @@ vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4(
@@ -40,7 +40,7 @@ vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2(
@@ -49,7 +49,7 @@ vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2(
@@ -58,7 +58,7 @@ vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1(
@@ -67,7 +67,7 @@ vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1(
@@ -76,7 +76,7 @@ vint8m1_t test_vmacc_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2(
@@ -85,7 +85,7 @@ vint8m1_t test_vmacc_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2(
@@ -94,7 +94,7 @@ vint8m2_t test_vmacc_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4(
@@ -103,7 +103,7 @@ vint8m2_t test_vmacc_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4(
@@ -112,7 +112,7 @@ vint8m4_t test_vmacc_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8(
@@ -121,7 +121,7 @@ vint8m4_t test_vmacc_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8(
@@ -130,7 +130,7 @@ vint8m8_t test_vmacc_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4(
@@ -139,7 +139,7 @@ vint8m8_t test_vmacc_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4(
@@ -148,7 +148,7 @@ vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2(
@@ -157,7 +157,7 @@ vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2(
@@ -166,7 +166,7 @@ vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1(
@@ -175,7 +175,7 @@ vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1(
@@ -184,7 +184,7 @@ vint16m1_t test_vmacc_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2(
@@ -193,7 +193,7 @@ vint16m1_t test_vmacc_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2(
@@ -202,7 +202,7 @@ vint16m2_t test_vmacc_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4(
@@ -211,7 +211,7 @@ vint16m2_t test_vmacc_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4(
@@ -220,7 +220,7 @@ vint16m4_t test_vmacc_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8(
@@ -229,7 +229,7 @@ vint16m4_t test_vmacc_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8(
@@ -238,7 +238,7 @@ vint16m8_t test_vmacc_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2(
@@ -247,7 +247,7 @@ vint16m8_t test_vmacc_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2(
@@ -256,7 +256,7 @@ vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1(
@@ -265,7 +265,7 @@ vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1(
@@ -274,7 +274,7 @@ vint32m1_t test_vmacc_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vmacc_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2(
@@ -292,7 +292,7 @@ vint32m2_t test_vmacc_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4(
@@ -301,7 +301,7 @@ vint32m2_t test_vmacc_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4(
@@ -310,7 +310,7 @@ vint32m4_t test_vmacc_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8(
@@ -319,7 +319,7 @@ vint32m4_t test_vmacc_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8(
@@ -328,7 +328,7 @@ vint32m8_t test_vmacc_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1(
@@ -337,7 +337,7 @@ vint32m8_t test_vmacc_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1(
@@ -346,7 +346,7 @@ vint64m1_t test_vmacc_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2(
@@ -355,7 +355,7 @@ vint64m1_t test_vmacc_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2(
@@ -364,7 +364,7 @@ vint64m2_t test_vmacc_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4(
@@ -373,7 +373,7 @@ vint64m2_t test_vmacc_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4(
@@ -382,7 +382,7 @@ vint64m4_t test_vmacc_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8(
@@ -391,7 +391,7 @@ vint64m4_t test_vmacc_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8(
@@ -400,7 +400,7 @@ vint64m8_t test_vmacc_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8(
@@ -409,7 +409,7 @@ vint64m8_t test_vmacc_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1(
@@ -472,7 +472,7 @@ vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2(
@@ -481,7 +481,7 @@ vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2(
@@ -490,7 +490,7 @@ vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4(
@@ -499,7 +499,7 @@ vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4(
@@ -508,7 +508,7 @@ vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8(
@@ -517,7 +517,7 @@ vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8(
@@ -526,7 +526,7 @@ vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4(
@@ -535,7 +535,7 @@ vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1(
@@ -580,7 +580,7 @@ vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2(
@@ -589,7 +589,7 @@ vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2(
@@ -598,7 +598,7 @@ vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4(
@@ -607,7 +607,7 @@ vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4(
@@ -616,7 +616,7 @@ vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8(
@@ -625,7 +625,7 @@ vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8(
@@ -634,7 +634,7 @@ vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2(
@@ -643,7 +643,7 @@ vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1(
@@ -670,7 +670,7 @@ vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2(
@@ -679,7 +679,7 @@ vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2(
@@ -688,7 +688,7 @@ vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4(
@@ -697,7 +697,7 @@ vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4(
@@ -706,7 +706,7 @@ vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8(
@@ -715,7 +715,7 @@ vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8(
@@ -724,7 +724,7 @@ vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1(
@@ -733,7 +733,7 @@ vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1(
@@ -742,7 +742,7 @@ vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2(
@@ -751,7 +751,7 @@ vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2(
@@ -760,7 +760,7 @@ vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4(
@@ -769,7 +769,7 @@ vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4(
@@ -778,7 +778,7 @@ vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8(
@@ -787,7 +787,7 @@ vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmacc(vd, vs1, vs2, vl);
+ return __riscv_vmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8(
@@ -796,7 +796,7 @@ vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmacc(vd, rs1, vs2, vl);
+ return __riscv_vmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m(
@@ -805,7 +805,7 @@ vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m(
@@ -814,7 +814,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m(
@@ -823,7 +823,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m(
@@ -832,7 +832,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m(
@@ -841,7 +841,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m(
@@ -850,7 +850,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m(
@@ -859,7 +859,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m(
@@ -868,7 +868,7 @@ vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m(
@@ -877,7 +877,7 @@ vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m(
@@ -886,7 +886,7 @@ vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m(
@@ -895,7 +895,7 @@ vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m(
@@ -904,7 +904,7 @@ vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m(
@@ -913,7 +913,7 @@ vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m(
@@ -922,7 +922,7 @@ vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m(
@@ -931,7 +931,7 @@ vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m(
@@ -940,7 +940,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m(
@@ -949,7 +949,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m(
@@ -958,7 +958,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m(
@@ -967,7 +967,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m(
@@ -976,7 +976,7 @@ vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m(
@@ -985,7 +985,7 @@ vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m(
@@ -994,7 +994,7 @@ vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m(
@@ -1003,7 +1003,7 @@ vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m(
@@ -1012,7 +1012,7 @@ vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m(
@@ -1021,7 +1021,7 @@ vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m(
@@ -1030,7 +1030,7 @@ vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m(
@@ -1039,7 +1039,7 @@ vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m(
@@ -1048,7 +1048,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m(
@@ -1057,7 +1057,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m(
@@ -1066,7 +1066,7 @@ vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m(
@@ -1075,7 +1075,7 @@ vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m(
@@ -1084,7 +1084,7 @@ vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m(
@@ -1093,7 +1093,7 @@ vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m(
@@ -1102,7 +1102,7 @@ vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m(
@@ -1111,7 +1111,7 @@ vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m(
@@ -1120,7 +1120,7 @@ vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m(
@@ -1129,7 +1129,7 @@ vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m(
@@ -1138,7 +1138,7 @@ vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m(
@@ -1147,7 +1147,7 @@ vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m(
@@ -1156,7 +1156,7 @@ vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m(
@@ -1165,7 +1165,7 @@ vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m(
@@ -1174,7 +1174,7 @@ vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m(
@@ -1183,7 +1183,7 @@ vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m(
@@ -1192,7 +1192,7 @@ vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m(
@@ -1201,7 +1201,7 @@ vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m(
@@ -1210,7 +1210,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m(
@@ -1219,7 +1219,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m(
@@ -1228,7 +1228,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m(
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m(
@@ -1246,7 +1246,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m(
@@ -1255,7 +1255,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m(
@@ -1264,7 +1264,7 @@ vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m(
@@ -1273,7 +1273,7 @@ vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m(
@@ -1282,7 +1282,7 @@ vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m(
@@ -1300,7 +1300,7 @@ vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m(
@@ -1309,7 +1309,7 @@ vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m(
@@ -1318,7 +1318,7 @@ vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vu
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m(
@@ -1327,7 +1327,7 @@ vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m(
@@ -1336,7 +1336,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m(
@@ -1345,7 +1345,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m(
@@ -1354,7 +1354,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m(
@@ -1363,7 +1363,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m(
@@ -1372,7 +1372,7 @@ vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m(
@@ -1381,7 +1381,7 @@ vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m(
@@ -1390,7 +1390,7 @@ vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m(
@@ -1399,7 +1399,7 @@ vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m(
@@ -1408,7 +1408,7 @@ vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m(
@@ -1417,7 +1417,7 @@ vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m(
@@ -1426,7 +1426,7 @@ vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m(
@@ -1435,7 +1435,7 @@ vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m(
@@ -1444,7 +1444,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m(
@@ -1453,7 +1453,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m(
@@ -1462,7 +1462,7 @@ vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m(
@@ -1471,7 +1471,7 @@ vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m(
@@ -1480,7 +1480,7 @@ vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m(
@@ -1489,7 +1489,7 @@ vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m(
@@ -1498,7 +1498,7 @@ vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m(
@@ -1525,7 +1525,7 @@ vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m(
@@ -1534,7 +1534,7 @@ vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m(
@@ -1543,7 +1543,7 @@ vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m(
@@ -1552,7 +1552,7 @@ vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m(
@@ -1561,7 +1561,7 @@ vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m(
@@ -1570,7 +1570,7 @@ vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m(
@@ -1579,7 +1579,7 @@ vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m(
@@ -1588,6 +1588,6 @@ vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadc.c
index 6e87dfd9f9bd..9b61d75bc126 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadc.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf8_b64(
@@ -21,7 +21,7 @@ vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t car
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i8mf8_b64(
@@ -30,7 +30,7 @@ vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t carryin
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i8mf8_b64(
@@ -39,7 +39,7 @@ vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf4_b32(
@@ -48,7 +48,7 @@ vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf4_b32(
@@ -57,7 +57,7 @@ vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t car
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i8mf4_b32(
@@ -66,7 +66,7 @@ vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t carryin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i8mf4_b32(
@@ -75,7 +75,7 @@ vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf2_b16(
@@ -84,7 +84,7 @@ vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf2_b16(
@@ -93,7 +93,7 @@ vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t car
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i8mf2_b16(
@@ -102,7 +102,7 @@ vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t carryin
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i8mf2_b16(
@@ -111,7 +111,7 @@ vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m1_b8(
@@ -120,7 +120,7 @@ vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m1_b8(
@@ -129,7 +129,7 @@ vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i8m1_b8(
@@ -138,7 +138,7 @@ vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i8m1_b8(
@@ -147,7 +147,7 @@ vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m2_b4(
@@ -156,7 +156,7 @@ vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m2_b4(
@@ -165,7 +165,7 @@ vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i8m2_b4(
@@ -174,7 +174,7 @@ vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i8m2_b4(
@@ -183,7 +183,7 @@ vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m4_b2(
@@ -192,7 +192,7 @@ vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m4_b2(
@@ -201,7 +201,7 @@ vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i8m4_b2(
@@ -210,7 +210,7 @@ vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i8m4_b2(
@@ -219,7 +219,7 @@ vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m8_b1(
@@ -228,7 +228,7 @@ vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m8_b1(
@@ -237,7 +237,7 @@ vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i8m8_b1(
@@ -246,7 +246,7 @@ vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i8m8_b1(
@@ -255,7 +255,7 @@ vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf4_b64(
@@ -264,7 +264,7 @@ vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf4_b64(
@@ -273,7 +273,7 @@ vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i16mf4_b64(
@@ -282,7 +282,7 @@ vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t carr
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i16mf4_b64(
@@ -291,7 +291,7 @@ vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf2_b32(
@@ -300,7 +300,7 @@ vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf2_b32(
@@ -309,7 +309,7 @@ vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i16mf2_b32(
@@ -318,7 +318,7 @@ vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t carr
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i16mf2_b32(
@@ -327,7 +327,7 @@ vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m1_b16(
@@ -336,7 +336,7 @@ vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m1_b16(
@@ -345,7 +345,7 @@ vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t car
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i16m1_b16(
@@ -354,7 +354,7 @@ vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t carryi
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i16m1_b16(
@@ -363,7 +363,7 @@ vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m2_b8(
@@ -372,7 +372,7 @@ vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m2_b8(
@@ -381,7 +381,7 @@ vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t carryi
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i16m2_b8(
@@ -390,7 +390,7 @@ vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i16m2_b8(
@@ -399,7 +399,7 @@ vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m4_b4(
@@ -408,7 +408,7 @@ vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m4_b4(
@@ -417,7 +417,7 @@ vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryi
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i16m4_b4(
@@ -426,7 +426,7 @@ vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i16m4_b4(
@@ -435,7 +435,7 @@ vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m8_b2(
@@ -444,7 +444,7 @@ vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m8_b2(
@@ -453,7 +453,7 @@ vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t carryi
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i16m8_b2(
@@ -462,7 +462,7 @@ vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i16m8_b2(
@@ -471,7 +471,7 @@ vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i32mf2_b64(
@@ -480,7 +480,7 @@ vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i32mf2_b64(
@@ -489,7 +489,7 @@ vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i32mf2_b64(
@@ -498,7 +498,7 @@ vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t carr
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i32mf2_b64(
@@ -507,7 +507,7 @@ vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m1_b32(
@@ -516,7 +516,7 @@ vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m1_b32(
@@ -525,7 +525,7 @@ vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t car
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i32m1_b32(
@@ -534,7 +534,7 @@ vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t carryi
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i32m1_b32(
@@ -543,7 +543,7 @@ vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m2_b16(
@@ -552,7 +552,7 @@ vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m2_b16(
@@ -561,7 +561,7 @@ vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t car
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i32m2_b16(
@@ -570,7 +570,7 @@ vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t carryi
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i32m2_b16(
@@ -579,7 +579,7 @@ vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m4_b8(
@@ -588,7 +588,7 @@ vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m4_b8(
@@ -597,7 +597,7 @@ vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t carryi
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i32m4_b8(
@@ -606,7 +606,7 @@ vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i32m4_b8(
@@ -615,7 +615,7 @@ vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m8_b4(
@@ -624,7 +624,7 @@ vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m8_b4(
@@ -633,7 +633,7 @@ vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t carryi
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i32m8_b4(
@@ -642,7 +642,7 @@ vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i32m8_b4(
@@ -651,7 +651,7 @@ vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m1_b64(
@@ -660,7 +660,7 @@ vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m1_b64(
@@ -669,7 +669,7 @@ vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t car
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i64m1_b64(
@@ -678,7 +678,7 @@ vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t carryi
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i64m1_b64(
@@ -687,7 +687,7 @@ vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m2_b32(
@@ -696,7 +696,7 @@ vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m2_b32(
@@ -705,7 +705,7 @@ vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t car
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i64m2_b32(
@@ -714,7 +714,7 @@ vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t carryi
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i64m2_b32(
@@ -723,7 +723,7 @@ vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m4_b16(
@@ -732,7 +732,7 @@ vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m4_b16(
@@ -741,7 +741,7 @@ vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t car
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i64m4_b16(
@@ -750,7 +750,7 @@ vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t carryi
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i64m4_b16(
@@ -759,7 +759,7 @@ vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m8_b8(
@@ -768,7 +768,7 @@ vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m8_b8(
@@ -777,7 +777,7 @@ vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryi
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_i64m8_b8(
@@ -786,7 +786,7 @@ vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_i64m8_b8(
@@ -795,7 +795,7 @@ vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf8_b64(
@@ -804,7 +804,7 @@ vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf8_b64(
@@ -813,7 +813,7 @@ vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t c
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u8mf8_b64(
@@ -822,7 +822,7 @@ vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t carry
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u8mf8_b64(
@@ -831,7 +831,7 @@ vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf4_b32(
@@ -840,7 +840,7 @@ vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf4_b32(
@@ -849,7 +849,7 @@ vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t c
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u8mf4_b32(
@@ -858,7 +858,7 @@ vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t carry
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u8mf4_b32(
@@ -867,7 +867,7 @@ vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf2_b16(
@@ -876,7 +876,7 @@ vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf2_b16(
@@ -885,7 +885,7 @@ vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t c
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u8mf2_b16(
@@ -894,7 +894,7 @@ vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t carry
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u8mf2_b16(
@@ -903,7 +903,7 @@ vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m1_b8(
@@ -912,7 +912,7 @@ vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m1_b8(
@@ -921,7 +921,7 @@ vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u8m1_b8(
@@ -930,7 +930,7 @@ vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u8m1_b8(
@@ -939,7 +939,7 @@ vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m2_b4(
@@ -948,7 +948,7 @@ vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m2_b4(
@@ -957,7 +957,7 @@ vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u8m2_b4(
@@ -966,7 +966,7 @@ vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u8m2_b4(
@@ -975,7 +975,7 @@ vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m4_b2(
@@ -984,7 +984,7 @@ vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m4_b2(
@@ -993,7 +993,7 @@ vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u8m4_b2(
@@ -1002,7 +1002,7 @@ vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u8m4_b2(
@@ -1011,7 +1011,7 @@ vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m8_b1(
@@ -1020,7 +1020,7 @@ vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m8_b1(
@@ -1029,7 +1029,7 @@ vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u8m8_b1(
@@ -1038,7 +1038,7 @@ vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u8m8_b1(
@@ -1047,7 +1047,7 @@ vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf4_b64(
@@ -1056,7 +1056,7 @@ vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf4_b64(
@@ -1065,7 +1065,7 @@ vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u16mf4_b64(
@@ -1074,7 +1074,7 @@ vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t ca
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u16mf4_b64(
@@ -1083,7 +1083,7 @@ vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf2_b32(
@@ -1092,7 +1092,7 @@ vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf2_b32(
@@ -1101,7 +1101,7 @@ vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u16mf2_b32(
@@ -1110,7 +1110,7 @@ vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t ca
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u16mf2_b32(
@@ -1119,7 +1119,7 @@ vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m1_b16(
@@ -1128,7 +1128,7 @@ vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m1_b16(
@@ -1137,7 +1137,7 @@ vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t c
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u16m1_b16(
@@ -1146,7 +1146,7 @@ vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t carr
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u16m1_b16(
@@ -1155,7 +1155,7 @@ vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m2_b8(
@@ -1164,7 +1164,7 @@ vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m2_b8(
@@ -1173,7 +1173,7 @@ vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carr
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u16m2_b8(
@@ -1182,7 +1182,7 @@ vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t carryin
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u16m2_b8(
@@ -1191,7 +1191,7 @@ vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m4_b4(
@@ -1200,7 +1200,7 @@ vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m4_b4(
@@ -1209,7 +1209,7 @@ vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carr
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u16m4_b4(
@@ -1218,7 +1218,7 @@ vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u16m4_b4(
@@ -1227,7 +1227,7 @@ vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m8_b2(
@@ -1236,7 +1236,7 @@ vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m8_b2(
@@ -1245,7 +1245,7 @@ vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carr
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u16m8_b2(
@@ -1254,7 +1254,7 @@ vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t carryin
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u16m8_b2(
@@ -1263,7 +1263,7 @@ vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u32mf2_b64(
@@ -1272,7 +1272,7 @@ vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u32mf2_b64(
@@ -1281,7 +1281,7 @@ vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u32mf2_b64(
@@ -1290,7 +1290,7 @@ vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t ca
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u32mf2_b64(
@@ -1299,7 +1299,7 @@ vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m1_b32(
@@ -1308,7 +1308,7 @@ vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m1_b32(
@@ -1317,7 +1317,7 @@ vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t c
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u32m1_b32(
@@ -1326,7 +1326,7 @@ vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t carr
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u32m1_b32(
@@ -1335,7 +1335,7 @@ vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m2_b16(
@@ -1344,7 +1344,7 @@ vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m2_b16(
@@ -1353,7 +1353,7 @@ vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t c
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u32m2_b16(
@@ -1362,7 +1362,7 @@ vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t carr
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u32m2_b16(
@@ -1371,7 +1371,7 @@ vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m4_b8(
@@ -1380,7 +1380,7 @@ vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m4_b8(
@@ -1389,7 +1389,7 @@ vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carr
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u32m4_b8(
@@ -1398,7 +1398,7 @@ vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t carryin
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u32m4_b8(
@@ -1407,7 +1407,7 @@ vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m8_b4(
@@ -1416,7 +1416,7 @@ vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m8_b4(
@@ -1425,7 +1425,7 @@ vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carr
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u32m8_b4(
@@ -1434,7 +1434,7 @@ vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t carryin
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u32m8_b4(
@@ -1443,7 +1443,7 @@ vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m1_b64(
@@ -1452,7 +1452,7 @@ vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m1_b64(
@@ -1461,7 +1461,7 @@ vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t c
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u64m1_b64(
@@ -1470,7 +1470,7 @@ vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t carr
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u64m1_b64(
@@ -1479,7 +1479,7 @@ vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m2_b32(
@@ -1488,7 +1488,7 @@ vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m2_b32(
@@ -1497,7 +1497,7 @@ vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t c
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u64m2_b32(
@@ -1506,7 +1506,7 @@ vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t carr
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u64m2_b32(
@@ -1515,7 +1515,7 @@ vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m4_b16(
@@ -1524,7 +1524,7 @@ vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m4_b16(
@@ -1533,7 +1533,7 @@ vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t c
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u64m4_b16(
@@ -1542,7 +1542,7 @@ vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t carr
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u64m4_b16(
@@ -1551,7 +1551,7 @@ vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m8_b8(
@@ -1560,7 +1560,7 @@ vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m8_b8(
@@ -1569,7 +1569,7 @@ vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carr
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) {
- return vmadc(op1, op2, carryin, vl);
+ return __riscv_vmadc(op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vv_u64m8_b8(
@@ -1578,7 +1578,7 @@ vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmadc_vx_u64m8_b8(
@@ -1587,6 +1587,6 @@ vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmadc(op1, op2, vl);
+ return __riscv_vmadc(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c
index a5ac78884f28..601ce4ff044c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8(
@@ -22,7 +22,7 @@ vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4(
@@ -31,7 +31,7 @@ vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4(
@@ -40,7 +40,7 @@ vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2(
@@ -49,7 +49,7 @@ vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2(
@@ -58,7 +58,7 @@ vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1(
@@ -67,7 +67,7 @@ vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1(
@@ -76,7 +76,7 @@ vint8m1_t test_vmadd_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2(
@@ -85,7 +85,7 @@ vint8m1_t test_vmadd_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2(
@@ -94,7 +94,7 @@ vint8m2_t test_vmadd_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4(
@@ -103,7 +103,7 @@ vint8m2_t test_vmadd_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4(
@@ -112,7 +112,7 @@ vint8m4_t test_vmadd_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8(
@@ -121,7 +121,7 @@ vint8m4_t test_vmadd_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8(
@@ -130,7 +130,7 @@ vint8m8_t test_vmadd_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4(
@@ -139,7 +139,7 @@ vint8m8_t test_vmadd_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4(
@@ -148,7 +148,7 @@ vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2(
@@ -157,7 +157,7 @@ vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2(
@@ -166,7 +166,7 @@ vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1(
@@ -175,7 +175,7 @@ vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1(
@@ -184,7 +184,7 @@ vint16m1_t test_vmadd_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2(
@@ -193,7 +193,7 @@ vint16m1_t test_vmadd_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2(
@@ -202,7 +202,7 @@ vint16m2_t test_vmadd_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4(
@@ -211,7 +211,7 @@ vint16m2_t test_vmadd_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4(
@@ -220,7 +220,7 @@ vint16m4_t test_vmadd_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8(
@@ -229,7 +229,7 @@ vint16m4_t test_vmadd_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8(
@@ -238,7 +238,7 @@ vint16m8_t test_vmadd_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2(
@@ -247,7 +247,7 @@ vint16m8_t test_vmadd_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2(
@@ -256,7 +256,7 @@ vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1(
@@ -265,7 +265,7 @@ vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1(
@@ -274,7 +274,7 @@ vint32m1_t test_vmadd_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vmadd_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2(
@@ -292,7 +292,7 @@ vint32m2_t test_vmadd_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4(
@@ -301,7 +301,7 @@ vint32m2_t test_vmadd_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4(
@@ -310,7 +310,7 @@ vint32m4_t test_vmadd_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8(
@@ -319,7 +319,7 @@ vint32m4_t test_vmadd_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8(
@@ -328,7 +328,7 @@ vint32m8_t test_vmadd_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1(
@@ -337,7 +337,7 @@ vint32m8_t test_vmadd_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1(
@@ -346,7 +346,7 @@ vint64m1_t test_vmadd_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2(
@@ -355,7 +355,7 @@ vint64m1_t test_vmadd_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2(
@@ -364,7 +364,7 @@ vint64m2_t test_vmadd_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4(
@@ -373,7 +373,7 @@ vint64m2_t test_vmadd_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4(
@@ -382,7 +382,7 @@ vint64m4_t test_vmadd_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8(
@@ -391,7 +391,7 @@ vint64m4_t test_vmadd_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8(
@@ -400,7 +400,7 @@ vint64m8_t test_vmadd_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8(
@@ -409,7 +409,7 @@ vint64m8_t test_vmadd_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1(
@@ -472,7 +472,7 @@ vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2(
@@ -481,7 +481,7 @@ vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2(
@@ -490,7 +490,7 @@ vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4(
@@ -499,7 +499,7 @@ vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4(
@@ -508,7 +508,7 @@ vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8(
@@ -517,7 +517,7 @@ vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8(
@@ -526,7 +526,7 @@ vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4(
@@ -535,7 +535,7 @@ vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1(
@@ -580,7 +580,7 @@ vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2(
@@ -589,7 +589,7 @@ vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2(
@@ -598,7 +598,7 @@ vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4(
@@ -607,7 +607,7 @@ vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4(
@@ -616,7 +616,7 @@ vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8(
@@ -625,7 +625,7 @@ vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8(
@@ -634,7 +634,7 @@ vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2(
@@ -643,7 +643,7 @@ vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1(
@@ -670,7 +670,7 @@ vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2(
@@ -679,7 +679,7 @@ vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2(
@@ -688,7 +688,7 @@ vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4(
@@ -697,7 +697,7 @@ vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4(
@@ -706,7 +706,7 @@ vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8(
@@ -715,7 +715,7 @@ vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8(
@@ -724,7 +724,7 @@ vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1(
@@ -733,7 +733,7 @@ vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1(
@@ -742,7 +742,7 @@ vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2(
@@ -751,7 +751,7 @@ vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2(
@@ -760,7 +760,7 @@ vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4(
@@ -769,7 +769,7 @@ vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4(
@@ -778,7 +778,7 @@ vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8(
@@ -787,7 +787,7 @@ vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmadd(vd, vs1, vs2, vl);
+ return __riscv_vmadd(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8(
@@ -796,7 +796,7 @@ vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmadd(vd, rs1, vs2, vl);
+ return __riscv_vmadd(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m(
@@ -805,7 +805,7 @@ vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m(
@@ -814,7 +814,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m(
@@ -823,7 +823,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m(
@@ -832,7 +832,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m(
@@ -841,7 +841,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m(
@@ -850,7 +850,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m(
@@ -859,7 +859,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m(
@@ -868,7 +868,7 @@ vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m(
@@ -877,7 +877,7 @@ vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m(
@@ -886,7 +886,7 @@ vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m(
@@ -895,7 +895,7 @@ vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m(
@@ -904,7 +904,7 @@ vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m(
@@ -913,7 +913,7 @@ vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m(
@@ -922,7 +922,7 @@ vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m(
@@ -931,7 +931,7 @@ vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m(
@@ -940,7 +940,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m(
@@ -949,7 +949,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m(
@@ -958,7 +958,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m(
@@ -967,7 +967,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m(
@@ -976,7 +976,7 @@ vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m(
@@ -985,7 +985,7 @@ vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m(
@@ -994,7 +994,7 @@ vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m(
@@ -1003,7 +1003,7 @@ vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m(
@@ -1012,7 +1012,7 @@ vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m(
@@ -1021,7 +1021,7 @@ vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m(
@@ -1030,7 +1030,7 @@ vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m(
@@ -1039,7 +1039,7 @@ vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m(
@@ -1048,7 +1048,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m(
@@ -1057,7 +1057,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m(
@@ -1066,7 +1066,7 @@ vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m(
@@ -1075,7 +1075,7 @@ vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m(
@@ -1084,7 +1084,7 @@ vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m(
@@ -1093,7 +1093,7 @@ vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m(
@@ -1102,7 +1102,7 @@ vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m(
@@ -1111,7 +1111,7 @@ vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m(
@@ -1120,7 +1120,7 @@ vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m(
@@ -1129,7 +1129,7 @@ vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m(
@@ -1138,7 +1138,7 @@ vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m(
@@ -1147,7 +1147,7 @@ vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m(
@@ -1156,7 +1156,7 @@ vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m(
@@ -1165,7 +1165,7 @@ vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m(
@@ -1174,7 +1174,7 @@ vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m(
@@ -1183,7 +1183,7 @@ vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m(
@@ -1192,7 +1192,7 @@ vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m(
@@ -1201,7 +1201,7 @@ vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m(
@@ -1210,7 +1210,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m(
@@ -1219,7 +1219,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m(
@@ -1228,7 +1228,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m(
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m(
@@ -1246,7 +1246,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m(
@@ -1255,7 +1255,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m(
@@ -1264,7 +1264,7 @@ vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m(
@@ -1273,7 +1273,7 @@ vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m(
@@ -1282,7 +1282,7 @@ vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m(
@@ -1300,7 +1300,7 @@ vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m(
@@ -1309,7 +1309,7 @@ vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m(
@@ -1318,7 +1318,7 @@ vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vu
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m(
@@ -1327,7 +1327,7 @@ vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m(
@@ -1336,7 +1336,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m(
@@ -1345,7 +1345,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m(
@@ -1354,7 +1354,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m(
@@ -1363,7 +1363,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m(
@@ -1372,7 +1372,7 @@ vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m(
@@ -1381,7 +1381,7 @@ vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m(
@@ -1390,7 +1390,7 @@ vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m(
@@ -1399,7 +1399,7 @@ vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m(
@@ -1408,7 +1408,7 @@ vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m(
@@ -1417,7 +1417,7 @@ vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m(
@@ -1426,7 +1426,7 @@ vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m(
@@ -1435,7 +1435,7 @@ vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m(
@@ -1444,7 +1444,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m(
@@ -1453,7 +1453,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m(
@@ -1462,7 +1462,7 @@ vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m(
@@ -1471,7 +1471,7 @@ vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m(
@@ -1480,7 +1480,7 @@ vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m(
@@ -1489,7 +1489,7 @@ vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m(
@@ -1498,7 +1498,7 @@ vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m(
@@ -1525,7 +1525,7 @@ vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m(
@@ -1534,7 +1534,7 @@ vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m(
@@ -1543,7 +1543,7 @@ vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m(
@@ -1552,7 +1552,7 @@ vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m(
@@ -1561,7 +1561,7 @@ vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m(
@@ -1570,7 +1570,7 @@ vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m(
@@ -1579,7 +1579,7 @@ vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmadd(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m(
@@ -1588,6 +1588,6 @@ vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmadd(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmand.c
index 7014995724b5..a147e7570c68 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmand.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmand(op1, op2, vl);
+ return __riscv_vmand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmand_mm_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmand(op1, op2, vl);
+ return __riscv_vmand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmand_mm_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmand(op1, op2, vl);
+ return __riscv_vmand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmand_mm_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmand(op1, op2, vl);
+ return __riscv_vmand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmand_mm_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmand(op1, op2, vl);
+ return __riscv_vmand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmand_mm_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmand(op1, op2, vl);
+ return __riscv_vmand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmand_mm_b64(
@@ -66,6 +66,6 @@ vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmand(op1, op2, vl);
+ return __riscv_vmand(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmandn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmandn.c
index b50cbddc6d6f..14cdb46a4392 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmandn.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmandn.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmandn(op1, op2, vl);
+ return __riscv_vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandn_mm_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmandn(op1, op2, vl);
+ return __riscv_vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandn_mm_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmandn(op1, op2, vl);
+ return __riscv_vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandn_mm_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmandn(op1, op2, vl);
+ return __riscv_vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandn_mm_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmandn(op1, op2, vl);
+ return __riscv_vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandn_mm_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmandn(op1, op2, vl);
+ return __riscv_vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandn_mm_b64(
@@ -66,6 +66,6 @@ vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmandn(op1, op2, vl);
+ return __riscv_vmandn(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmax.c
index 44dc82a9f8fb..643d09e59a45 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmax.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmax(op1, op2, vl);
+ return __riscv_vmax(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmax(mask, op1, op2, vl);
+ return __riscv_vmax(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmaxu.c
index d6dc6b0e95ee..9a0becafa03c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmaxu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmaxu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmaxu(op1, op2, vl);
+ return __riscv_vmaxu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmaxu(mask, op1, op2, vl);
+ return __riscv_vmaxu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
index f5f4c75568b7..b659a92f471b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
@@ -22,7 +22,7 @@ vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
@@ -31,7 +31,7 @@ vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
@@ -40,7 +40,7 @@ vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
@@ -49,7 +49,7 @@ vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
@@ -58,7 +58,7 @@ vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
@@ -67,7 +67,7 @@ vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
@@ -76,7 +76,7 @@ vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
@@ -85,7 +85,7 @@ vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
@@ -94,7 +94,7 @@ vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
@@ -103,7 +103,7 @@ vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
@@ -112,7 +112,7 @@ vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
@@ -121,7 +121,7 @@ vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
@@ -130,7 +130,7 @@ vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
@@ -139,7 +139,7 @@ vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
@@ -148,7 +148,7 @@ vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
@@ -157,7 +157,7 @@ vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
@@ -166,7 +166,7 @@ vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
@@ -175,7 +175,7 @@ vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
@@ -184,7 +184,7 @@ vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
@@ -193,7 +193,7 @@ vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
@@ -202,7 +202,7 @@ vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
@@ -211,7 +211,7 @@ vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
@@ -220,7 +220,7 @@ vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
@@ -229,7 +229,7 @@ vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
@@ -238,7 +238,7 @@ vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
@@ -247,7 +247,7 @@ vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
@@ -256,7 +256,7 @@ vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
@@ -265,7 +265,7 @@ vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
@@ -274,7 +274,7 @@ vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
@@ -292,7 +292,7 @@ vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
@@ -301,7 +301,7 @@ vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
@@ -310,7 +310,7 @@ vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
@@ -319,7 +319,7 @@ vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
@@ -328,7 +328,7 @@ vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
@@ -337,7 +337,7 @@ vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
@@ -346,7 +346,7 @@ vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
@@ -355,7 +355,7 @@ vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
@@ -364,7 +364,7 @@ vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
@@ -373,7 +373,7 @@ vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
@@ -382,7 +382,7 @@ vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
@@ -391,7 +391,7 @@ vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
@@ -400,7 +400,7 @@ vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
@@ -409,7 +409,7 @@ vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t ma
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t ma
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
@@ -472,7 +472,7 @@ vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
@@ -481,7 +481,7 @@ vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
@@ -490,7 +490,7 @@ vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
@@ -499,7 +499,7 @@ vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
@@ -508,7 +508,7 @@ vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
@@ -517,7 +517,7 @@ vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
@@ -526,7 +526,7 @@ vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
@@ -535,7 +535,7 @@ vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t ma
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
@@ -580,7 +580,7 @@ vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t ma
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
@@ -589,7 +589,7 @@ vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
@@ -598,7 +598,7 @@ vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mas
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
@@ -607,7 +607,7 @@ vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
@@ -616,7 +616,7 @@ vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mas
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
@@ -625,7 +625,7 @@ vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
@@ -634,7 +634,7 @@ vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mas
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
@@ -643,7 +643,7 @@ vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
@@ -670,7 +670,7 @@ vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
@@ -679,7 +679,7 @@ vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
@@ -688,7 +688,7 @@ vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t ma
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
@@ -697,7 +697,7 @@ vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
@@ -706,7 +706,7 @@ vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mas
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
@@ -715,7 +715,7 @@ vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
@@ -724,7 +724,7 @@ vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mas
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
@@ -733,7 +733,7 @@ vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
@@ -742,7 +742,7 @@ vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t ma
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
@@ -751,7 +751,7 @@ vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
@@ -760,7 +760,7 @@ vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
@@ -769,7 +769,7 @@ vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
@@ -778,7 +778,7 @@ vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t ma
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
@@ -787,7 +787,7 @@ vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
@@ -796,7 +796,7 @@ vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mas
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4(
@@ -805,7 +805,7 @@ vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2(
@@ -814,7 +814,7 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1(
@@ -823,7 +823,7 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2(
@@ -832,7 +832,7 @@ vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4(
@@ -841,7 +841,7 @@ vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8(
@@ -850,7 +850,7 @@ vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
@@ -859,7 +859,7 @@ vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
@@ -868,7 +868,7 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
@@ -877,7 +877,7 @@ vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
@@ -886,7 +886,7 @@ vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
@@ -895,7 +895,7 @@ vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
@@ -904,7 +904,7 @@ vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
@@ -913,7 +913,7 @@ vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
@@ -922,7 +922,7 @@ vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
@@ -931,6 +931,6 @@ vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) {
- return vmerge(op1, op2, mask, vl);
+ return __riscv_vmerge(op1, op2, mask, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq.c
index a353e4bed75b..d0b2b57a2903 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmfeq_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmfeq_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmfeq_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmfeq_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmfeq_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmfeq_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmfeq_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmfeq_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmfeq_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmfeq_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfeq_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmfeq_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfeq_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64(
@@ -121,7 +121,7 @@ vbool2_t test_vmfeq_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64(
@@ -130,7 +130,7 @@ vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32(
@@ -139,7 +139,7 @@ vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32(
@@ -148,7 +148,7 @@ vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16(
@@ -157,7 +157,7 @@ vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16(
@@ -166,7 +166,7 @@ vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8(
@@ -175,7 +175,7 @@ vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8(
@@ -184,7 +184,7 @@ vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4(
@@ -193,7 +193,7 @@ vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4(
@@ -202,7 +202,7 @@ vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64(
@@ -211,7 +211,7 @@ vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64(
@@ -220,7 +220,7 @@ vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32(
@@ -229,7 +229,7 @@ vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32(
@@ -238,7 +238,7 @@ vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16(
@@ -247,7 +247,7 @@ vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16(
@@ -256,7 +256,7 @@ vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8(
@@ -265,7 +265,7 @@ vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8(
@@ -274,7 +274,7 @@ vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
- return vmfeq(op1, op2, vl);
+ return __riscv_vmfeq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64_m(
@@ -283,7 +283,7 @@ vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_m(
@@ -292,7 +292,7 @@ vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_m(
@@ -301,7 +301,7 @@ vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_m(
@@ -310,7 +310,7 @@ vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_m(
@@ -319,7 +319,7 @@ vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_m(
@@ -328,7 +328,7 @@ vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_m(
@@ -337,7 +337,7 @@ vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_m(
@@ -346,7 +346,7 @@ vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_m(
@@ -355,7 +355,7 @@ vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_m(
@@ -364,7 +364,7 @@ vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_m(
@@ -373,7 +373,7 @@ vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_m(
@@ -382,7 +382,7 @@ vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m(
@@ -391,7 +391,7 @@ vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m(
@@ -400,7 +400,7 @@ vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m(
@@ -409,7 +409,7 @@ vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m(
@@ -418,7 +418,7 @@ vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m(
@@ -427,7 +427,7 @@ vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m(
@@ -436,7 +436,7 @@ vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m(
@@ -445,7 +445,7 @@ vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m(
@@ -454,7 +454,7 @@ vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m(
@@ -463,7 +463,7 @@ vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m(
@@ -472,7 +472,7 @@ vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m(
@@ -481,7 +481,7 @@ vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m(
@@ -490,7 +490,7 @@ vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m(
@@ -499,7 +499,7 @@ vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m(
@@ -508,7 +508,7 @@ vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m(
@@ -517,7 +517,7 @@ vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m(
@@ -526,7 +526,7 @@ vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m(
@@ -535,7 +535,7 @@ vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m(
@@ -544,6 +544,6 @@ vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfeq(mask, op1, op2, vl);
+ return __riscv_vmfeq(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge.c
index f014c1fcaffe..62650c4b163b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64(
@@ -121,7 +121,7 @@ vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64(
@@ -130,7 +130,7 @@ vbool64_t test_vmfge_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32(
@@ -139,7 +139,7 @@ vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32(
@@ -148,7 +148,7 @@ vbool32_t test_vmfge_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16(
@@ -157,7 +157,7 @@ vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16(
@@ -166,7 +166,7 @@ vbool16_t test_vmfge_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8(
@@ -175,7 +175,7 @@ vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8(
@@ -184,7 +184,7 @@ vbool8_t test_vmfge_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4(
@@ -193,7 +193,7 @@ vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4(
@@ -202,7 +202,7 @@ vbool4_t test_vmfge_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64(
@@ -211,7 +211,7 @@ vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64(
@@ -220,7 +220,7 @@ vbool64_t test_vmfge_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32(
@@ -229,7 +229,7 @@ vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32(
@@ -238,7 +238,7 @@ vbool32_t test_vmfge_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16(
@@ -247,7 +247,7 @@ vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16(
@@ -256,7 +256,7 @@ vbool16_t test_vmfge_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8(
@@ -265,7 +265,7 @@ vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8(
@@ -274,7 +274,7 @@ vbool8_t test_vmfge_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
- return vmfge(op1, op2, vl);
+ return __riscv_vmfge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64_m(
@@ -283,7 +283,7 @@ vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_m(
@@ -292,7 +292,7 @@ vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_m(
@@ -301,7 +301,7 @@ vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_m(
@@ -310,7 +310,7 @@ vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_m(
@@ -319,7 +319,7 @@ vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_m(
@@ -328,7 +328,7 @@ vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_m(
@@ -337,7 +337,7 @@ vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_m(
@@ -346,7 +346,7 @@ vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_m(
@@ -355,7 +355,7 @@ vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_m(
@@ -364,7 +364,7 @@ vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_m(
@@ -373,7 +373,7 @@ vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_m(
@@ -382,7 +382,7 @@ vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_m(
@@ -391,7 +391,7 @@ vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m(
@@ -400,7 +400,7 @@ vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32_m(
@@ -409,7 +409,7 @@ vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m(
@@ -418,7 +418,7 @@ vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16_m(
@@ -427,7 +427,7 @@ vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m(
@@ -436,7 +436,7 @@ vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8_m(
@@ -445,7 +445,7 @@ vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m(
@@ -454,7 +454,7 @@ vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4_m(
@@ -463,7 +463,7 @@ vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m(
@@ -472,7 +472,7 @@ vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64_m(
@@ -481,7 +481,7 @@ vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m(
@@ -490,7 +490,7 @@ vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32_m(
@@ -499,7 +499,7 @@ vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m(
@@ -508,7 +508,7 @@ vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16_m(
@@ -517,7 +517,7 @@ vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m(
@@ -526,7 +526,7 @@ vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8_m(
@@ -535,7 +535,7 @@ vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m(
@@ -544,6 +544,6 @@ vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfge(mask, op1, op2, vl);
+ return __riscv_vmfge(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt.c
index 5c9f99ecb0f7..70fa40e65cc6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmfgt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmfgt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmfgt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmfgt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmfgt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmfgt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmfgt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmfgt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmfgt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmfgt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfgt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmfgt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfgt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64(
@@ -121,7 +121,7 @@ vbool2_t test_vmfgt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64(
@@ -130,7 +130,7 @@ vbool64_t test_vmfgt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32(
@@ -139,7 +139,7 @@ vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32(
@@ -148,7 +148,7 @@ vbool32_t test_vmfgt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16(
@@ -157,7 +157,7 @@ vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16(
@@ -166,7 +166,7 @@ vbool16_t test_vmfgt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8(
@@ -175,7 +175,7 @@ vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8(
@@ -184,7 +184,7 @@ vbool8_t test_vmfgt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4(
@@ -193,7 +193,7 @@ vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4(
@@ -202,7 +202,7 @@ vbool4_t test_vmfgt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64(
@@ -211,7 +211,7 @@ vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64(
@@ -220,7 +220,7 @@ vbool64_t test_vmfgt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32(
@@ -229,7 +229,7 @@ vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32(
@@ -238,7 +238,7 @@ vbool32_t test_vmfgt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16(
@@ -247,7 +247,7 @@ vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16(
@@ -256,7 +256,7 @@ vbool16_t test_vmfgt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8(
@@ -265,7 +265,7 @@ vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8(
@@ -274,7 +274,7 @@ vbool8_t test_vmfgt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
- return vmfgt(op1, op2, vl);
+ return __riscv_vmfgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64_m(
@@ -283,7 +283,7 @@ vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_m(
@@ -292,7 +292,7 @@ vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_m(
@@ -301,7 +301,7 @@ vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_m(
@@ -310,7 +310,7 @@ vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_m(
@@ -319,7 +319,7 @@ vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_m(
@@ -328,7 +328,7 @@ vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_m(
@@ -337,7 +337,7 @@ vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_m(
@@ -346,7 +346,7 @@ vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_m(
@@ -355,7 +355,7 @@ vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_m(
@@ -364,7 +364,7 @@ vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_m(
@@ -373,7 +373,7 @@ vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_m(
@@ -382,7 +382,7 @@ vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_m(
@@ -391,7 +391,7 @@ vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m(
@@ -400,7 +400,7 @@ vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32_m(
@@ -409,7 +409,7 @@ vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m(
@@ -418,7 +418,7 @@ vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16_m(
@@ -427,7 +427,7 @@ vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m(
@@ -436,7 +436,7 @@ vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8_m(
@@ -445,7 +445,7 @@ vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m(
@@ -454,7 +454,7 @@ vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4_m(
@@ -463,7 +463,7 @@ vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m(
@@ -472,7 +472,7 @@ vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64_m(
@@ -481,7 +481,7 @@ vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m(
@@ -490,7 +490,7 @@ vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32_m(
@@ -499,7 +499,7 @@ vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m(
@@ -508,7 +508,7 @@ vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16_m(
@@ -517,7 +517,7 @@ vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m(
@@ -526,7 +526,7 @@ vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8_m(
@@ -535,7 +535,7 @@ vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m(
@@ -544,6 +544,6 @@ vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfgt(mask, op1, op2, vl);
+ return __riscv_vmfgt(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle.c
index 0428940e1b31..a3c8b79dbb00 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmfle_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmfle_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmfle_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmfle_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmfle_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmfle_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmfle_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmfle_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmfle_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmfle_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfle_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmfle_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfle_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64(
@@ -121,7 +121,7 @@ vbool2_t test_vmfle_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64(
@@ -130,7 +130,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32(
@@ -139,7 +139,7 @@ vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32(
@@ -148,7 +148,7 @@ vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16(
@@ -157,7 +157,7 @@ vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16(
@@ -166,7 +166,7 @@ vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8(
@@ -175,7 +175,7 @@ vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8(
@@ -184,7 +184,7 @@ vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4(
@@ -193,7 +193,7 @@ vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4(
@@ -202,7 +202,7 @@ vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64(
@@ -211,7 +211,7 @@ vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64(
@@ -220,7 +220,7 @@ vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32(
@@ -229,7 +229,7 @@ vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32(
@@ -238,7 +238,7 @@ vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16(
@@ -247,7 +247,7 @@ vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16(
@@ -256,7 +256,7 @@ vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8(
@@ -265,7 +265,7 @@ vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8(
@@ -274,7 +274,7 @@ vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
- return vmfle(op1, op2, vl);
+ return __riscv_vmfle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64_m(
@@ -283,7 +283,7 @@ vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_m(
@@ -292,7 +292,7 @@ vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_m(
@@ -301,7 +301,7 @@ vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_m(
@@ -310,7 +310,7 @@ vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_m(
@@ -319,7 +319,7 @@ vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_m(
@@ -328,7 +328,7 @@ vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_m(
@@ -337,7 +337,7 @@ vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_m(
@@ -346,7 +346,7 @@ vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_m(
@@ -355,7 +355,7 @@ vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_m(
@@ -364,7 +364,7 @@ vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_m(
@@ -373,7 +373,7 @@ vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_m(
@@ -382,7 +382,7 @@ vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m(
@@ -391,7 +391,7 @@ vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m(
@@ -400,7 +400,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m(
@@ -409,7 +409,7 @@ vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m(
@@ -418,7 +418,7 @@ vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m(
@@ -427,7 +427,7 @@ vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m(
@@ -436,7 +436,7 @@ vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m(
@@ -445,7 +445,7 @@ vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m(
@@ -454,7 +454,7 @@ vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m(
@@ -463,7 +463,7 @@ vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m(
@@ -472,7 +472,7 @@ vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m(
@@ -481,7 +481,7 @@ vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m(
@@ -490,7 +490,7 @@ vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m(
@@ -499,7 +499,7 @@ vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m(
@@ -508,7 +508,7 @@ vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m(
@@ -517,7 +517,7 @@ vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m(
@@ -526,7 +526,7 @@ vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m(
@@ -535,7 +535,7 @@ vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m(
@@ -544,6 +544,6 @@ vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfle(mask, op1, op2, vl);
+ return __riscv_vmfle(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt.c
index 4d31621760d3..ad49d666abc8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64(
@@ -121,7 +121,7 @@ vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64(
@@ -130,7 +130,7 @@ vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32(
@@ -139,7 +139,7 @@ vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32(
@@ -148,7 +148,7 @@ vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16(
@@ -157,7 +157,7 @@ vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16(
@@ -166,7 +166,7 @@ vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8(
@@ -175,7 +175,7 @@ vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8(
@@ -184,7 +184,7 @@ vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4(
@@ -193,7 +193,7 @@ vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4(
@@ -202,7 +202,7 @@ vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64(
@@ -211,7 +211,7 @@ vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64(
@@ -220,7 +220,7 @@ vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32(
@@ -229,7 +229,7 @@ vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32(
@@ -238,7 +238,7 @@ vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16(
@@ -247,7 +247,7 @@ vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16(
@@ -256,7 +256,7 @@ vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8(
@@ -265,7 +265,7 @@ vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8(
@@ -274,7 +274,7 @@ vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
- return vmflt(op1, op2, vl);
+ return __riscv_vmflt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64_m(
@@ -283,7 +283,7 @@ vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_m(
@@ -292,7 +292,7 @@ vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_m(
@@ -301,7 +301,7 @@ vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_m(
@@ -310,7 +310,7 @@ vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_m(
@@ -319,7 +319,7 @@ vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_m(
@@ -328,7 +328,7 @@ vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_m(
@@ -337,7 +337,7 @@ vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_m(
@@ -346,7 +346,7 @@ vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_m(
@@ -355,7 +355,7 @@ vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_m(
@@ -364,7 +364,7 @@ vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_m(
@@ -373,7 +373,7 @@ vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_m(
@@ -382,7 +382,7 @@ vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m(
@@ -391,7 +391,7 @@ vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m(
@@ -400,7 +400,7 @@ vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m(
@@ -409,7 +409,7 @@ vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m(
@@ -418,7 +418,7 @@ vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m(
@@ -427,7 +427,7 @@ vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m(
@@ -436,7 +436,7 @@ vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m(
@@ -445,7 +445,7 @@ vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m(
@@ -454,7 +454,7 @@ vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m(
@@ -463,7 +463,7 @@ vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m(
@@ -472,7 +472,7 @@ vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m(
@@ -481,7 +481,7 @@ vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m(
@@ -490,7 +490,7 @@ vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m(
@@ -499,7 +499,7 @@ vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m(
@@ -508,7 +508,7 @@ vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m(
@@ -517,7 +517,7 @@ vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m(
@@ -526,7 +526,7 @@ vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m(
@@ -535,7 +535,7 @@ vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m(
@@ -544,6 +544,6 @@ vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vmflt(mask, op1, op2, vl);
+ return __riscv_vmflt(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne.c
index 9c7f8730f8a4..f5c94520c980 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64(
@@ -121,7 +121,7 @@ vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64(
@@ -130,7 +130,7 @@ vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32(
@@ -139,7 +139,7 @@ vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32(
@@ -148,7 +148,7 @@ vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16(
@@ -157,7 +157,7 @@ vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16(
@@ -166,7 +166,7 @@ vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8(
@@ -175,7 +175,7 @@ vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8(
@@ -184,7 +184,7 @@ vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4(
@@ -193,7 +193,7 @@ vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4(
@@ -202,7 +202,7 @@ vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64(
@@ -211,7 +211,7 @@ vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64(
@@ -220,7 +220,7 @@ vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32(
@@ -229,7 +229,7 @@ vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32(
@@ -238,7 +238,7 @@ vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16(
@@ -247,7 +247,7 @@ vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16(
@@ -256,7 +256,7 @@ vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8(
@@ -265,7 +265,7 @@ vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8(
@@ -274,7 +274,7 @@ vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
- return vmfne(op1, op2, vl);
+ return __riscv_vmfne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64_m(
@@ -283,7 +283,7 @@ vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_m(
@@ -292,7 +292,7 @@ vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_m(
@@ -301,7 +301,7 @@ vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_m(
@@ -310,7 +310,7 @@ vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_m(
@@ -319,7 +319,7 @@ vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_m(
@@ -328,7 +328,7 @@ vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_m(
@@ -337,7 +337,7 @@ vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_m(
@@ -346,7 +346,7 @@ vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_m(
@@ -355,7 +355,7 @@ vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_m(
@@ -364,7 +364,7 @@ vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_m(
@@ -373,7 +373,7 @@ vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_m(
@@ -382,7 +382,7 @@ vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m(
@@ -391,7 +391,7 @@ vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m(
@@ -400,7 +400,7 @@ vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m(
@@ -409,7 +409,7 @@ vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m(
@@ -418,7 +418,7 @@ vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m(
@@ -427,7 +427,7 @@ vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m(
@@ -436,7 +436,7 @@ vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m(
@@ -445,7 +445,7 @@ vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m(
@@ -454,7 +454,7 @@ vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m(
@@ -463,7 +463,7 @@ vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m(
@@ -472,7 +472,7 @@ vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m(
@@ -481,7 +481,7 @@ vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m(
@@ -490,7 +490,7 @@ vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m(
@@ -499,7 +499,7 @@ vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m(
@@ -508,7 +508,7 @@ vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m(
@@ -517,7 +517,7 @@ vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m(
@@ -526,7 +526,7 @@ vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m(
@@ -535,7 +535,7 @@ vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m(
@@ -544,6 +544,6 @@ vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfne(mask, op1, op2, vl);
+ return __riscv_vmfne(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmin.c
index c15cae7edc4a..97968624f973 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmin.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmin(op1, op2, vl);
+ return __riscv_vmin(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmin(mask, op1, op2, vl);
+ return __riscv_vmin(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vminu.c
index 5541b2b68185..b440eabf6164 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vminu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vminu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vminu(op1, op2, vl);
+ return __riscv_vminu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vminu(mask, op1, op2, vl);
+ return __riscv_vminu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmmv.c
index 62bccfbe25cd..255816be4e01 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmmv.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmmv_m_b1(vbool1_t op1, size_t vl) {
- return vmmv(op1, vl);
+ return __riscv_vmmv(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmmv_m_b2(
@@ -22,7 +22,7 @@ vbool1_t test_vmmv_m_b1(vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmmv_m_b2(vbool2_t op1, size_t vl) {
- return vmmv(op1, vl);
+ return __riscv_vmmv(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmmv_m_b4(
@@ -31,7 +31,7 @@ vbool2_t test_vmmv_m_b2(vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmmv_m_b4(vbool4_t op1, size_t vl) {
- return vmmv(op1, vl);
+ return __riscv_vmmv(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmmv_m_b8(
@@ -40,7 +40,7 @@ vbool4_t test_vmmv_m_b4(vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmmv_m_b8(vbool8_t op1, size_t vl) {
- return vmmv(op1, vl);
+ return __riscv_vmmv(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmmv_m_b16(
@@ -49,7 +49,7 @@ vbool8_t test_vmmv_m_b8(vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmmv_m_b16(vbool16_t op1, size_t vl) {
- return vmmv(op1, vl);
+ return __riscv_vmmv(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmmv_m_b32(
@@ -58,7 +58,7 @@ vbool16_t test_vmmv_m_b16(vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmmv_m_b32(vbool32_t op1, size_t vl) {
- return vmmv(op1, vl);
+ return __riscv_vmmv(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmmv_m_b64(
@@ -67,6 +67,6 @@ vbool32_t test_vmmv_m_b32(vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmmv_m_b64(vbool64_t op1, size_t vl) {
- return vmmv(op1, vl);
+ return __riscv_vmmv(op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnand.c
index 103cce56eef4..6d71511c34e8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnand.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmnand(op1, op2, vl);
+ return __riscv_vmnand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnand_mm_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmnand(op1, op2, vl);
+ return __riscv_vmnand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnand_mm_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmnand(op1, op2, vl);
+ return __riscv_vmnand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnand_mm_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmnand(op1, op2, vl);
+ return __riscv_vmnand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnand_mm_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmnand(op1, op2, vl);
+ return __riscv_vmnand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnand_mm_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmnand(op1, op2, vl);
+ return __riscv_vmnand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnand_mm_b64(
@@ -66,6 +66,6 @@ vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmnand(op1, op2, vl);
+ return __riscv_vmnand(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnor.c
index 1c9db03fbb5b..fd5a6a6bc26f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmnor(op1, op2, vl);
+ return __riscv_vmnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnor_mm_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmnor(op1, op2, vl);
+ return __riscv_vmnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnor_mm_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmnor(op1, op2, vl);
+ return __riscv_vmnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnor_mm_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmnor(op1, op2, vl);
+ return __riscv_vmnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnor_mm_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmnor(op1, op2, vl);
+ return __riscv_vmnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnor_mm_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmnor(op1, op2, vl);
+ return __riscv_vmnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmnor_mm_b64(
@@ -66,6 +66,6 @@ vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmnor(op1, op2, vl);
+ return __riscv_vmnor(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnot.c
index c7fad85ac4bd..102654876148 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmnot.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmnot_m_b1(vbool1_t op1, size_t vl) {
- return vmnot(op1, vl);
+ return __riscv_vmnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmnot_m_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmnot_m_b1(vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmnot_m_b2(vbool2_t op1, size_t vl) {
- return vmnot(op1, vl);
+ return __riscv_vmnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmnot_m_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmnot_m_b2(vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmnot_m_b4(vbool4_t op1, size_t vl) {
- return vmnot(op1, vl);
+ return __riscv_vmnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmnot_m_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmnot_m_b4(vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmnot_m_b8(vbool8_t op1, size_t vl) {
- return vmnot(op1, vl);
+ return __riscv_vmnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmnot_m_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmnot_m_b8(vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmnot_m_b16(vbool16_t op1, size_t vl) {
- return vmnot(op1, vl);
+ return __riscv_vmnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmnot_m_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmnot_m_b16(vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmnot_m_b32(vbool32_t op1, size_t vl) {
- return vmnot(op1, vl);
+ return __riscv_vmnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmnot_m_b64(
@@ -66,6 +66,6 @@ vbool32_t test_vmnot_m_b32(vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmnot_m_b64(vbool64_t op1, size_t vl) {
- return vmnot(op1, vl);
+ return __riscv_vmnot(op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmor.c
index bce0202ec155..fe1a463e3e25 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmor(op1, op2, vl);
+ return __riscv_vmor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmor_mm_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmor(op1, op2, vl);
+ return __riscv_vmor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmor_mm_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmor(op1, op2, vl);
+ return __riscv_vmor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmor_mm_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmor(op1, op2, vl);
+ return __riscv_vmor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmor_mm_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmor(op1, op2, vl);
+ return __riscv_vmor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmor_mm_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmor(op1, op2, vl);
+ return __riscv_vmor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmor_mm_b64(
@@ -66,6 +66,6 @@ vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmor(op1, op2, vl);
+ return __riscv_vmor(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmorn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmorn.c
index ebac000a1650..f92ca7cac344 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmorn.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmorn.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmorn(op1, op2, vl);
+ return __riscv_vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmorn_mm_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmorn(op1, op2, vl);
+ return __riscv_vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmorn_mm_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmorn(op1, op2, vl);
+ return __riscv_vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmorn_mm_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmorn(op1, op2, vl);
+ return __riscv_vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmorn_mm_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmorn(op1, op2, vl);
+ return __riscv_vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmorn_mm_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmorn(op1, op2, vl);
+ return __riscv_vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmorn_mm_b64(
@@ -66,6 +66,6 @@ vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmorn(op1, op2, vl);
+ return __riscv_vmorn(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbc.c
index 2e1060a527dc..02eb1d735d7e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbc.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf8_b64(
@@ -21,7 +21,7 @@ vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t bor
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf8_b64(
@@ -30,7 +30,7 @@ vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf8_b64(
@@ -39,7 +39,7 @@ vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf4_b32(
@@ -48,7 +48,7 @@ vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf4_b32(
@@ -57,7 +57,7 @@ vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t bor
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf4_b32(
@@ -66,7 +66,7 @@ vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf4_b32(
@@ -75,7 +75,7 @@ vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf2_b16(
@@ -84,7 +84,7 @@ vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf2_b16(
@@ -93,7 +93,7 @@ vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t bor
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf2_b16(
@@ -102,7 +102,7 @@ vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf2_b16(
@@ -111,7 +111,7 @@ vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m1_b8(
@@ -120,7 +120,7 @@ vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m1_b8(
@@ -129,7 +129,7 @@ vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m1_b8(
@@ -138,7 +138,7 @@ vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m1_b8(
@@ -147,7 +147,7 @@ vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m2_b4(
@@ -156,7 +156,7 @@ vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m2_b4(
@@ -165,7 +165,7 @@ vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m2_b4(
@@ -174,7 +174,7 @@ vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m2_b4(
@@ -183,7 +183,7 @@ vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m4_b2(
@@ -192,7 +192,7 @@ vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m4_b2(
@@ -201,7 +201,7 @@ vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m4_b2(
@@ -210,7 +210,7 @@ vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m4_b2(
@@ -219,7 +219,7 @@ vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m8_b1(
@@ -228,7 +228,7 @@ vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m8_b1(
@@ -237,7 +237,7 @@ vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m8_b1(
@@ -246,7 +246,7 @@ vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m8_b1(
@@ -255,7 +255,7 @@ vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf4_b64(
@@ -264,7 +264,7 @@ vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf4_b64(
@@ -273,7 +273,7 @@ vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf4_b64(
@@ -282,7 +282,7 @@ vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t borr
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf4_b64(
@@ -291,7 +291,7 @@ vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf2_b32(
@@ -300,7 +300,7 @@ vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf2_b32(
@@ -309,7 +309,7 @@ vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf2_b32(
@@ -318,7 +318,7 @@ vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t borr
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf2_b32(
@@ -327,7 +327,7 @@ vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m1_b16(
@@ -336,7 +336,7 @@ vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m1_b16(
@@ -345,7 +345,7 @@ vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t bor
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m1_b16(
@@ -354,7 +354,7 @@ vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t borrow
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m1_b16(
@@ -363,7 +363,7 @@ vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m2_b8(
@@ -372,7 +372,7 @@ vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m2_b8(
@@ -381,7 +381,7 @@ vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t borrow
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m2_b8(
@@ -390,7 +390,7 @@ vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m2_b8(
@@ -399,7 +399,7 @@ vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m4_b4(
@@ -408,7 +408,7 @@ vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m4_b4(
@@ -417,7 +417,7 @@ vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrow
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m4_b4(
@@ -426,7 +426,7 @@ vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m4_b4(
@@ -435,7 +435,7 @@ vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m8_b2(
@@ -444,7 +444,7 @@ vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m8_b2(
@@ -453,7 +453,7 @@ vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t borrow
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m8_b2(
@@ -462,7 +462,7 @@ vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m8_b2(
@@ -471,7 +471,7 @@ vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32mf2_b64(
@@ -480,7 +480,7 @@ vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32mf2_b64(
@@ -489,7 +489,7 @@ vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i32mf2_b64(
@@ -498,7 +498,7 @@ vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t borr
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i32mf2_b64(
@@ -507,7 +507,7 @@ vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m1_b32(
@@ -516,7 +516,7 @@ vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m1_b32(
@@ -525,7 +525,7 @@ vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t bor
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m1_b32(
@@ -534,7 +534,7 @@ vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t borrow
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m1_b32(
@@ -543,7 +543,7 @@ vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m2_b16(
@@ -552,7 +552,7 @@ vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m2_b16(
@@ -561,7 +561,7 @@ vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t bor
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m2_b16(
@@ -570,7 +570,7 @@ vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t borrow
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m2_b16(
@@ -579,7 +579,7 @@ vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m4_b8(
@@ -588,7 +588,7 @@ vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m4_b8(
@@ -597,7 +597,7 @@ vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t borrow
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m4_b8(
@@ -606,7 +606,7 @@ vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m4_b8(
@@ -615,7 +615,7 @@ vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m8_b4(
@@ -624,7 +624,7 @@ vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m8_b4(
@@ -633,7 +633,7 @@ vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t borrow
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m8_b4(
@@ -642,7 +642,7 @@ vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m8_b4(
@@ -651,7 +651,7 @@ vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m1_b64(
@@ -660,7 +660,7 @@ vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m1_b64(
@@ -669,7 +669,7 @@ vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t bor
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m1_b64(
@@ -678,7 +678,7 @@ vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t borrow
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m1_b64(
@@ -687,7 +687,7 @@ vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m2_b32(
@@ -696,7 +696,7 @@ vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m2_b32(
@@ -705,7 +705,7 @@ vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t bor
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m2_b32(
@@ -714,7 +714,7 @@ vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t borrow
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m2_b32(
@@ -723,7 +723,7 @@ vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m4_b16(
@@ -732,7 +732,7 @@ vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m4_b16(
@@ -741,7 +741,7 @@ vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t bor
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m4_b16(
@@ -750,7 +750,7 @@ vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t borrow
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m4_b16(
@@ -759,7 +759,7 @@ vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m8_b8(
@@ -768,7 +768,7 @@ vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m8_b8(
@@ -777,7 +777,7 @@ vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrow
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m8_b8(
@@ -786,7 +786,7 @@ vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m8_b8(
@@ -795,7 +795,7 @@ vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf8_b64(
@@ -804,7 +804,7 @@ vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf8_b64(
@@ -813,7 +813,7 @@ vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t b
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf8_b64(
@@ -822,7 +822,7 @@ vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t borro
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf8_b64(
@@ -831,7 +831,7 @@ vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf4_b32(
@@ -840,7 +840,7 @@ vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf4_b32(
@@ -849,7 +849,7 @@ vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t b
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf4_b32(
@@ -858,7 +858,7 @@ vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t borro
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf4_b32(
@@ -867,7 +867,7 @@ vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf2_b16(
@@ -876,7 +876,7 @@ vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf2_b16(
@@ -885,7 +885,7 @@ vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t b
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf2_b16(
@@ -894,7 +894,7 @@ vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t borro
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf2_b16(
@@ -903,7 +903,7 @@ vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m1_b8(
@@ -912,7 +912,7 @@ vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m1_b8(
@@ -921,7 +921,7 @@ vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m1_b8(
@@ -930,7 +930,7 @@ vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m1_b8(
@@ -939,7 +939,7 @@ vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m2_b4(
@@ -948,7 +948,7 @@ vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m2_b4(
@@ -957,7 +957,7 @@ vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m2_b4(
@@ -966,7 +966,7 @@ vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m2_b4(
@@ -975,7 +975,7 @@ vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m4_b2(
@@ -984,7 +984,7 @@ vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m4_b2(
@@ -993,7 +993,7 @@ vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m4_b2(
@@ -1002,7 +1002,7 @@ vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m4_b2(
@@ -1011,7 +1011,7 @@ vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m8_b1(
@@ -1020,7 +1020,7 @@ vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m8_b1(
@@ -1029,7 +1029,7 @@ vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m8_b1(
@@ -1038,7 +1038,7 @@ vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m8_b1(
@@ -1047,7 +1047,7 @@ vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf4_b64(
@@ -1056,7 +1056,7 @@ vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf4_b64(
@@ -1065,7 +1065,7 @@ vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf4_b64(
@@ -1074,7 +1074,7 @@ vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t bo
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf4_b64(
@@ -1083,7 +1083,7 @@ vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf2_b32(
@@ -1092,7 +1092,7 @@ vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf2_b32(
@@ -1101,7 +1101,7 @@ vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf2_b32(
@@ -1110,7 +1110,7 @@ vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t bo
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf2_b32(
@@ -1119,7 +1119,7 @@ vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m1_b16(
@@ -1128,7 +1128,7 @@ vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m1_b16(
@@ -1137,7 +1137,7 @@ vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t b
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m1_b16(
@@ -1146,7 +1146,7 @@ vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t borr
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m1_b16(
@@ -1155,7 +1155,7 @@ vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m2_b8(
@@ -1164,7 +1164,7 @@ vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m2_b8(
@@ -1173,7 +1173,7 @@ vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borr
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m2_b8(
@@ -1182,7 +1182,7 @@ vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m2_b8(
@@ -1191,7 +1191,7 @@ vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m4_b4(
@@ -1200,7 +1200,7 @@ vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m4_b4(
@@ -1209,7 +1209,7 @@ vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borr
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m4_b4(
@@ -1218,7 +1218,7 @@ vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m4_b4(
@@ -1227,7 +1227,7 @@ vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m8_b2(
@@ -1236,7 +1236,7 @@ vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m8_b2(
@@ -1245,7 +1245,7 @@ vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borr
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m8_b2(
@@ -1254,7 +1254,7 @@ vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m8_b2(
@@ -1263,7 +1263,7 @@ vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32mf2_b64(
@@ -1272,7 +1272,7 @@ vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32mf2_b64(
@@ -1281,7 +1281,7 @@ vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u32mf2_b64(
@@ -1290,7 +1290,7 @@ vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t bo
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u32mf2_b64(
@@ -1299,7 +1299,7 @@ vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m1_b32(
@@ -1308,7 +1308,7 @@ vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m1_b32(
@@ -1317,7 +1317,7 @@ vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t b
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m1_b32(
@@ -1326,7 +1326,7 @@ vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t borr
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m1_b32(
@@ -1335,7 +1335,7 @@ vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m2_b16(
@@ -1344,7 +1344,7 @@ vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m2_b16(
@@ -1353,7 +1353,7 @@ vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t b
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m2_b16(
@@ -1362,7 +1362,7 @@ vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t borr
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m2_b16(
@@ -1371,7 +1371,7 @@ vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m4_b8(
@@ -1380,7 +1380,7 @@ vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m4_b8(
@@ -1389,7 +1389,7 @@ vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borr
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m4_b8(
@@ -1398,7 +1398,7 @@ vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m4_b8(
@@ -1407,7 +1407,7 @@ vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m8_b4(
@@ -1416,7 +1416,7 @@ vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m8_b4(
@@ -1425,7 +1425,7 @@ vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borr
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m8_b4(
@@ -1434,7 +1434,7 @@ vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m8_b4(
@@ -1443,7 +1443,7 @@ vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m1_b64(
@@ -1452,7 +1452,7 @@ vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m1_b64(
@@ -1461,7 +1461,7 @@ vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t b
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m1_b64(
@@ -1470,7 +1470,7 @@ vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t borr
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m1_b64(
@@ -1479,7 +1479,7 @@ vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m2_b32(
@@ -1488,7 +1488,7 @@ vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m2_b32(
@@ -1497,7 +1497,7 @@ vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t b
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m2_b32(
@@ -1506,7 +1506,7 @@ vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t borr
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m2_b32(
@@ -1515,7 +1515,7 @@ vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m4_b16(
@@ -1524,7 +1524,7 @@ vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m4_b16(
@@ -1533,7 +1533,7 @@ vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t b
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m4_b16(
@@ -1542,7 +1542,7 @@ vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t borr
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m4_b16(
@@ -1551,7 +1551,7 @@ vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m8_b8(
@@ -1560,7 +1560,7 @@ vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m8_b8(
@@ -1569,7 +1569,7 @@ vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borr
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) {
- return vmsbc(op1, op2, borrowin, vl);
+ return __riscv_vmsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m8_b8(
@@ -1578,7 +1578,7 @@ vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m8_b8(
@@ -1587,6 +1587,6 @@ vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsbc(op1, op2, vl);
+ return __riscv_vmsbc(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbf.c
index 0efa60d71083..a1ea744dc121 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbf.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) {
- return vmsbf(op1, vl);
+ return __riscv_vmsbf(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) {
- return vmsbf(op1, vl);
+ return __riscv_vmsbf(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) {
- return vmsbf(op1, vl);
+ return __riscv_vmsbf(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) {
- return vmsbf(op1, vl);
+ return __riscv_vmsbf(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) {
- return vmsbf(op1, vl);
+ return __riscv_vmsbf(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) {
- return vmsbf(op1, vl);
+ return __riscv_vmsbf(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b64(
@@ -66,7 +66,7 @@ vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) {
- return vmsbf(op1, vl);
+ return __riscv_vmsbf(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b1_m(
@@ -75,7 +75,7 @@ vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
- return vmsbf(mask, op1, vl);
+ return __riscv_vmsbf(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b2_m(
@@ -84,7 +84,7 @@ vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
- return vmsbf(mask, op1, vl);
+ return __riscv_vmsbf(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b4_m(
@@ -93,7 +93,7 @@ vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
- return vmsbf(mask, op1, vl);
+ return __riscv_vmsbf(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b8_m(
@@ -102,7 +102,7 @@ vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
- return vmsbf(mask, op1, vl);
+ return __riscv_vmsbf(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b16_m(
@@ -111,7 +111,7 @@ vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
- return vmsbf(mask, op1, vl);
+ return __riscv_vmsbf(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b32_m(
@@ -120,7 +120,7 @@ vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
- return vmsbf(mask, op1, vl);
+ return __riscv_vmsbf(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b64_m(
@@ -129,6 +129,6 @@ vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
- return vmsbf(mask, op1, vl);
+ return __riscv_vmsbf(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmseq.c
index 6c5b1ebadd98..79ace98244d7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmseq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmseq.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64(
@@ -409,7 +409,7 @@ vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64(
@@ -418,7 +418,7 @@ vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32(
@@ -427,7 +427,7 @@ vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32(
@@ -436,7 +436,7 @@ vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16(
@@ -445,7 +445,7 @@ vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16(
@@ -454,7 +454,7 @@ vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8(
@@ -463,7 +463,7 @@ vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8(
@@ -472,7 +472,7 @@ vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4(
@@ -481,7 +481,7 @@ vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4(
@@ -490,7 +490,7 @@ vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2(
@@ -499,7 +499,7 @@ vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2(
@@ -508,7 +508,7 @@ vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1(
@@ -517,7 +517,7 @@ vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1(
@@ -526,7 +526,7 @@ vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64(
@@ -535,7 +535,7 @@ vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64(
@@ -544,7 +544,7 @@ vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32(
@@ -553,7 +553,7 @@ vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32(
@@ -562,7 +562,7 @@ vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16(
@@ -571,7 +571,7 @@ vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16(
@@ -580,7 +580,7 @@ vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8(
@@ -589,7 +589,7 @@ vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8(
@@ -598,7 +598,7 @@ vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4(
@@ -607,7 +607,7 @@ vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4(
@@ -616,7 +616,7 @@ vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2(
@@ -625,7 +625,7 @@ vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2(
@@ -634,7 +634,7 @@ vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64(
@@ -643,7 +643,7 @@ vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64(
@@ -652,7 +652,7 @@ vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32(
@@ -661,7 +661,7 @@ vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32(
@@ -670,7 +670,7 @@ vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16(
@@ -679,7 +679,7 @@ vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16(
@@ -688,7 +688,7 @@ vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8(
@@ -697,7 +697,7 @@ vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8(
@@ -706,7 +706,7 @@ vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4(
@@ -715,7 +715,7 @@ vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4(
@@ -724,7 +724,7 @@ vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64(
@@ -733,7 +733,7 @@ vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64(
@@ -742,7 +742,7 @@ vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32(
@@ -751,7 +751,7 @@ vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32(
@@ -760,7 +760,7 @@ vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16(
@@ -769,7 +769,7 @@ vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16(
@@ -778,7 +778,7 @@ vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8(
@@ -787,7 +787,7 @@ vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8(
@@ -796,7 +796,7 @@ vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmseq(op1, op2, vl);
+ return __riscv_vmseq(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m(
@@ -805,7 +805,7 @@ vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m(
@@ -814,7 +814,7 @@ vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m(
@@ -823,7 +823,7 @@ vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m(
@@ -832,7 +832,7 @@ vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m(
@@ -841,7 +841,7 @@ vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m(
@@ -850,7 +850,7 @@ vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m(
@@ -859,7 +859,7 @@ vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m(
@@ -868,7 +868,7 @@ vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m(
@@ -877,7 +877,7 @@ vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m(
@@ -886,7 +886,7 @@ vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m(
@@ -895,7 +895,7 @@ vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m(
@@ -904,7 +904,7 @@ vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m(
@@ -913,7 +913,7 @@ vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m(
@@ -922,7 +922,7 @@ vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m(
@@ -931,7 +931,7 @@ vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m(
@@ -940,7 +940,7 @@ vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m(
@@ -949,7 +949,7 @@ vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m(
@@ -958,7 +958,7 @@ vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m(
@@ -967,7 +967,7 @@ vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m(
@@ -976,7 +976,7 @@ vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m(
@@ -985,7 +985,7 @@ vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m(
@@ -994,7 +994,7 @@ vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m(
@@ -1003,7 +1003,7 @@ vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m(
@@ -1012,7 +1012,7 @@ vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m(
@@ -1021,7 +1021,7 @@ vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m(
@@ -1030,7 +1030,7 @@ vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m(
@@ -1039,7 +1039,7 @@ vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m(
@@ -1048,7 +1048,7 @@ vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m(
@@ -1057,7 +1057,7 @@ vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m(
@@ -1066,7 +1066,7 @@ vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m(
@@ -1075,7 +1075,7 @@ vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m(
@@ -1084,7 +1084,7 @@ vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m(
@@ -1093,7 +1093,7 @@ vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m(
@@ -1102,7 +1102,7 @@ vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m(
@@ -1111,7 +1111,7 @@ vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m(
@@ -1120,7 +1120,7 @@ vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m(
@@ -1129,7 +1129,7 @@ vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m(
@@ -1138,7 +1138,7 @@ vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m(
@@ -1147,7 +1147,7 @@ vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m(
@@ -1156,7 +1156,7 @@ vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m(
@@ -1165,7 +1165,7 @@ vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m(
@@ -1174,7 +1174,7 @@ vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m(
@@ -1183,7 +1183,7 @@ vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m(
@@ -1192,7 +1192,7 @@ vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m(
@@ -1201,7 +1201,7 @@ vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m(
@@ -1210,7 +1210,7 @@ vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m(
@@ -1219,7 +1219,7 @@ vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m(
@@ -1228,7 +1228,7 @@ vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m(
@@ -1237,7 +1237,7 @@ vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m(
@@ -1246,7 +1246,7 @@ vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m(
@@ -1255,7 +1255,7 @@ vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m(
@@ -1264,7 +1264,7 @@ vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m(
@@ -1273,7 +1273,7 @@ vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m(
@@ -1282,7 +1282,7 @@ vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m(
@@ -1291,7 +1291,7 @@ vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m(
@@ -1300,7 +1300,7 @@ vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m(
@@ -1309,7 +1309,7 @@ vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m(
@@ -1318,7 +1318,7 @@ vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m(
@@ -1327,7 +1327,7 @@ vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m(
@@ -1336,7 +1336,7 @@ vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m(
@@ -1345,7 +1345,7 @@ vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m(
@@ -1354,7 +1354,7 @@ vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m(
@@ -1363,7 +1363,7 @@ vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m(
@@ -1372,7 +1372,7 @@ vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m(
@@ -1381,7 +1381,7 @@ vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m(
@@ -1390,7 +1390,7 @@ vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m(
@@ -1399,7 +1399,7 @@ vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m(
@@ -1408,7 +1408,7 @@ vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m(
@@ -1417,7 +1417,7 @@ vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m(
@@ -1426,7 +1426,7 @@ vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m(
@@ -1435,7 +1435,7 @@ vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m(
@@ -1444,7 +1444,7 @@ vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m(
@@ -1453,7 +1453,7 @@ vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m(
@@ -1462,7 +1462,7 @@ vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m(
@@ -1471,7 +1471,7 @@ vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m(
@@ -1480,7 +1480,7 @@ vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m(
@@ -1489,7 +1489,7 @@ vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m(
@@ -1498,7 +1498,7 @@ vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m(
@@ -1507,7 +1507,7 @@ vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m(
@@ -1516,7 +1516,7 @@ vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m(
@@ -1525,7 +1525,7 @@ vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m(
@@ -1534,7 +1534,7 @@ vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m(
@@ -1543,7 +1543,7 @@ vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m(
@@ -1552,7 +1552,7 @@ vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m(
@@ -1561,7 +1561,7 @@ vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m(
@@ -1570,7 +1570,7 @@ vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m(
@@ -1579,7 +1579,7 @@ vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m(
@@ -1588,6 +1588,6 @@ vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmseq(mask, op1, op2, vl);
+ return __riscv_vmseq(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsge.c
index f17b2363884d..615290465e9f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsge.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmsge_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmsge_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmsge_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmsge_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmsge_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmsge_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmsge_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmsge_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmsge_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmsge_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmsge_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmsge_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsge_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmsge_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsge_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmsge_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmsge_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmsge_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmsge_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmsge_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmsge_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmsge_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmsge_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmsge_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmsge_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmsge_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmsge_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmsge_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmsge_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmsge_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmsge_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmsge_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmsge_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmsge_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmsge_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmsge_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmsge_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmsge_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmsge_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmsge_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmsge_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmsge_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmsge_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmsge_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmsge_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsge(op1, op2, vl);
+ return __riscv_vmsge(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8mf8_b64_m(
@@ -409,7 +409,7 @@ vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64_m(
@@ -418,7 +418,7 @@ vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32_m(
@@ -427,7 +427,7 @@ vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32_m(
@@ -436,7 +436,7 @@ vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16_m(
@@ -445,7 +445,7 @@ vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16_m(
@@ -454,7 +454,7 @@ vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8_m(
@@ -463,7 +463,7 @@ vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8_m(
@@ -472,7 +472,7 @@ vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4_m(
@@ -481,7 +481,7 @@ vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4_m(
@@ -490,7 +490,7 @@ vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2_m(
@@ -499,7 +499,7 @@ vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2_m(
@@ -508,7 +508,7 @@ vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1_m(
@@ -517,7 +517,7 @@ vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1_m(
@@ -526,7 +526,7 @@ vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64_m(
@@ -535,7 +535,7 @@ vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64_m(
@@ -544,7 +544,7 @@ vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32_m(
@@ -553,7 +553,7 @@ vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32_m(
@@ -562,7 +562,7 @@ vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16_m(
@@ -571,7 +571,7 @@ vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16_m(
@@ -580,7 +580,7 @@ vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8_m(
@@ -589,7 +589,7 @@ vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8_m(
@@ -598,7 +598,7 @@ vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4_m(
@@ -607,7 +607,7 @@ vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4_m(
@@ -616,7 +616,7 @@ vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2_m(
@@ -625,7 +625,7 @@ vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2_m(
@@ -634,7 +634,7 @@ vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_m(
@@ -643,7 +643,7 @@ vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_m(
@@ -652,7 +652,7 @@ vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32_m(
@@ -661,7 +661,7 @@ vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32_m(
@@ -670,7 +670,7 @@ vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16_m(
@@ -679,7 +679,7 @@ vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16_m(
@@ -688,7 +688,7 @@ vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8_m(
@@ -697,7 +697,7 @@ vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8_m(
@@ -706,7 +706,7 @@ vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4_m(
@@ -715,7 +715,7 @@ vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4_m(
@@ -724,7 +724,7 @@ vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64_m(
@@ -733,7 +733,7 @@ vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64_m(
@@ -742,7 +742,7 @@ vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32_m(
@@ -751,7 +751,7 @@ vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32_m(
@@ -760,7 +760,7 @@ vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16_m(
@@ -769,7 +769,7 @@ vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16_m(
@@ -778,7 +778,7 @@ vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8_m(
@@ -787,7 +787,7 @@ vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8_m(
@@ -796,6 +796,6 @@ vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsge(mask, op1, op2, vl);
+ return __riscv_vmsge(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgeu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgeu.c
index d92ba4bbb617..3d26bd07e1e6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgeu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgeu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsgeu(op1, op2, vl);
+ return __riscv_vmsgeu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf8_b64_m(
@@ -409,7 +409,7 @@ vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64_m(
@@ -418,7 +418,7 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32_m(
@@ -427,7 +427,7 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32_m(
@@ -436,7 +436,7 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16_m(
@@ -445,7 +445,7 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16_m(
@@ -454,7 +454,7 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8_m(
@@ -463,7 +463,7 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8_m(
@@ -472,7 +472,7 @@ vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4_m(
@@ -481,7 +481,7 @@ vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4_m(
@@ -490,7 +490,7 @@ vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2_m(
@@ -499,7 +499,7 @@ vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2_m(
@@ -508,7 +508,7 @@ vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1_m(
@@ -517,7 +517,7 @@ vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1_m(
@@ -526,7 +526,7 @@ vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64_m(
@@ -535,7 +535,7 @@ vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64_m(
@@ -544,7 +544,7 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32_m(
@@ -553,7 +553,7 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32_m(
@@ -562,7 +562,7 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16_m(
@@ -571,7 +571,7 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16_m(
@@ -580,7 +580,7 @@ vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8_m(
@@ -589,7 +589,7 @@ vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8_m(
@@ -598,7 +598,7 @@ vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4_m(
@@ -607,7 +607,7 @@ vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4_m(
@@ -616,7 +616,7 @@ vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2_m(
@@ -625,7 +625,7 @@ vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2_m(
@@ -634,7 +634,7 @@ vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_m(
@@ -643,7 +643,7 @@ vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_m(
@@ -652,7 +652,7 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32_m(
@@ -661,7 +661,7 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32_m(
@@ -670,7 +670,7 @@ vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16_m(
@@ -679,7 +679,7 @@ vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16_m(
@@ -688,7 +688,7 @@ vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8_m(
@@ -697,7 +697,7 @@ vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8_m(
@@ -706,7 +706,7 @@ vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4_m(
@@ -715,7 +715,7 @@ vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4_m(
@@ -724,7 +724,7 @@ vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64_m(
@@ -733,7 +733,7 @@ vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64_m(
@@ -742,7 +742,7 @@ vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32_m(
@@ -751,7 +751,7 @@ vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32_m(
@@ -760,7 +760,7 @@ vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16_m(
@@ -769,7 +769,7 @@ vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16_m(
@@ -778,7 +778,7 @@ vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8_m(
@@ -787,7 +787,7 @@ vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8_m(
@@ -796,6 +796,6 @@ vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsgeu(mask, op1, op2, vl);
+ return __riscv_vmsgeu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgt.c
index 406a800db34c..9439027f7b4d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmsgt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmsgt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmsgt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmsgt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmsgt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmsgt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmsgt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmsgt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmsgt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmsgt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmsgt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmsgt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmsgt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmsgt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmsgt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmsgt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmsgt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmsgt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmsgt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmsgt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmsgt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmsgt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsgt(op1, op2, vl);
+ return __riscv_vmsgt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf8_b64_m(
@@ -409,7 +409,7 @@ vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m(
@@ -418,7 +418,7 @@ vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32_m(
@@ -427,7 +427,7 @@ vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m(
@@ -436,7 +436,7 @@ vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16_m(
@@ -445,7 +445,7 @@ vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m(
@@ -454,7 +454,7 @@ vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8_m(
@@ -463,7 +463,7 @@ vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m(
@@ -472,7 +472,7 @@ vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4_m(
@@ -481,7 +481,7 @@ vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m(
@@ -490,7 +490,7 @@ vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2_m(
@@ -499,7 +499,7 @@ vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m(
@@ -508,7 +508,7 @@ vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1_m(
@@ -517,7 +517,7 @@ vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m(
@@ -526,7 +526,7 @@ vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64_m(
@@ -535,7 +535,7 @@ vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m(
@@ -544,7 +544,7 @@ vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32_m(
@@ -553,7 +553,7 @@ vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m(
@@ -562,7 +562,7 @@ vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16_m(
@@ -571,7 +571,7 @@ vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m(
@@ -580,7 +580,7 @@ vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8_m(
@@ -589,7 +589,7 @@ vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m(
@@ -598,7 +598,7 @@ vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4_m(
@@ -607,7 +607,7 @@ vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m(
@@ -616,7 +616,7 @@ vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2_m(
@@ -625,7 +625,7 @@ vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m(
@@ -634,7 +634,7 @@ vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_m(
@@ -643,7 +643,7 @@ vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m(
@@ -652,7 +652,7 @@ vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32_m(
@@ -661,7 +661,7 @@ vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m(
@@ -670,7 +670,7 @@ vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16_m(
@@ -679,7 +679,7 @@ vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m(
@@ -688,7 +688,7 @@ vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8_m(
@@ -697,7 +697,7 @@ vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m(
@@ -706,7 +706,7 @@ vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4_m(
@@ -715,7 +715,7 @@ vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m(
@@ -724,7 +724,7 @@ vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64_m(
@@ -733,7 +733,7 @@ vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m(
@@ -742,7 +742,7 @@ vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32_m(
@@ -751,7 +751,7 @@ vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m(
@@ -760,7 +760,7 @@ vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16_m(
@@ -769,7 +769,7 @@ vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m(
@@ -778,7 +778,7 @@ vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8_m(
@@ -787,7 +787,7 @@ vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m(
@@ -796,6 +796,6 @@ vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsgt(mask, op1, op2, vl);
+ return __riscv_vmsgt(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgtu.c
index 5eff53f3dbd5..6911617ad618 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgtu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgtu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmsgtu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmsgtu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmsgtu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgtu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmsgtu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmsgtu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmsgtu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmsgtu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmsgtu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmsgtu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmsgtu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmsgtu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmsgtu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmsgtu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmsgtu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmsgtu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmsgtu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsgtu(op1, op2, vl);
+ return __riscv_vmsgtu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf8_b64_m(
@@ -409,7 +409,7 @@ vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m(
@@ -418,7 +418,7 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32_m(
@@ -427,7 +427,7 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m(
@@ -436,7 +436,7 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16_m(
@@ -445,7 +445,7 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m(
@@ -454,7 +454,7 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8_m(
@@ -463,7 +463,7 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m(
@@ -472,7 +472,7 @@ vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4_m(
@@ -481,7 +481,7 @@ vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m(
@@ -490,7 +490,7 @@ vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2_m(
@@ -499,7 +499,7 @@ vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m(
@@ -508,7 +508,7 @@ vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1_m(
@@ -517,7 +517,7 @@ vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m(
@@ -526,7 +526,7 @@ vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64_m(
@@ -535,7 +535,7 @@ vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m(
@@ -544,7 +544,7 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32_m(
@@ -553,7 +553,7 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m(
@@ -562,7 +562,7 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16_m(
@@ -571,7 +571,7 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m(
@@ -580,7 +580,7 @@ vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8_m(
@@ -589,7 +589,7 @@ vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m(
@@ -598,7 +598,7 @@ vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4_m(
@@ -607,7 +607,7 @@ vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m(
@@ -616,7 +616,7 @@ vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2_m(
@@ -625,7 +625,7 @@ vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m(
@@ -634,7 +634,7 @@ vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_m(
@@ -643,7 +643,7 @@ vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m(
@@ -652,7 +652,7 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32_m(
@@ -661,7 +661,7 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m(
@@ -670,7 +670,7 @@ vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16_m(
@@ -679,7 +679,7 @@ vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m(
@@ -688,7 +688,7 @@ vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8_m(
@@ -697,7 +697,7 @@ vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m(
@@ -706,7 +706,7 @@ vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4_m(
@@ -715,7 +715,7 @@ vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m(
@@ -724,7 +724,7 @@ vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64_m(
@@ -733,7 +733,7 @@ vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m(
@@ -742,7 +742,7 @@ vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32_m(
@@ -751,7 +751,7 @@ vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m(
@@ -760,7 +760,7 @@ vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16_m(
@@ -769,7 +769,7 @@ vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m(
@@ -778,7 +778,7 @@ vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8_m(
@@ -787,7 +787,7 @@ vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m(
@@ -796,6 +796,6 @@ vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsgtu(mask, op1, op2, vl);
+ return __riscv_vmsgtu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsif.c
index 4aacc7455d17..ebe2a422e50e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsif.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsif.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) {
- return vmsif(op1, vl);
+ return __riscv_vmsif(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) {
- return vmsif(op1, vl);
+ return __riscv_vmsif(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) {
- return vmsif(op1, vl);
+ return __riscv_vmsif(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) {
- return vmsif(op1, vl);
+ return __riscv_vmsif(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) {
- return vmsif(op1, vl);
+ return __riscv_vmsif(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) {
- return vmsif(op1, vl);
+ return __riscv_vmsif(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b64(
@@ -66,7 +66,7 @@ vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) {
- return vmsif(op1, vl);
+ return __riscv_vmsif(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b1_m(
@@ -75,7 +75,7 @@ vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
- return vmsif(mask, op1, vl);
+ return __riscv_vmsif(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b2_m(
@@ -84,7 +84,7 @@ vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
- return vmsif(mask, op1, vl);
+ return __riscv_vmsif(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b4_m(
@@ -93,7 +93,7 @@ vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
- return vmsif(mask, op1, vl);
+ return __riscv_vmsif(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b8_m(
@@ -102,7 +102,7 @@ vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
- return vmsif(mask, op1, vl);
+ return __riscv_vmsif(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b16_m(
@@ -111,7 +111,7 @@ vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
- return vmsif(mask, op1, vl);
+ return __riscv_vmsif(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b32_m(
@@ -120,7 +120,7 @@ vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
- return vmsif(mask, op1, vl);
+ return __riscv_vmsif(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b64_m(
@@ -129,6 +129,6 @@ vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
- return vmsif(mask, op1, vl);
+ return __riscv_vmsif(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsle.c
index 46be1f3fa2c2..ed2637d9c73e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsle.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsle(op1, op2, vl);
+ return __riscv_vmsle(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m(
@@ -409,7 +409,7 @@ vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m(
@@ -418,7 +418,7 @@ vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m(
@@ -427,7 +427,7 @@ vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m(
@@ -436,7 +436,7 @@ vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m(
@@ -445,7 +445,7 @@ vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m(
@@ -454,7 +454,7 @@ vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m(
@@ -463,7 +463,7 @@ vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m(
@@ -472,7 +472,7 @@ vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m(
@@ -481,7 +481,7 @@ vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m(
@@ -490,7 +490,7 @@ vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m(
@@ -499,7 +499,7 @@ vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m(
@@ -508,7 +508,7 @@ vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m(
@@ -517,7 +517,7 @@ vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m(
@@ -526,7 +526,7 @@ vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m(
@@ -535,7 +535,7 @@ vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m(
@@ -544,7 +544,7 @@ vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m(
@@ -553,7 +553,7 @@ vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m(
@@ -562,7 +562,7 @@ vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m(
@@ -571,7 +571,7 @@ vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m(
@@ -580,7 +580,7 @@ vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m(
@@ -589,7 +589,7 @@ vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m(
@@ -598,7 +598,7 @@ vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m(
@@ -607,7 +607,7 @@ vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m(
@@ -616,7 +616,7 @@ vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m(
@@ -625,7 +625,7 @@ vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m(
@@ -634,7 +634,7 @@ vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m(
@@ -643,7 +643,7 @@ vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m(
@@ -652,7 +652,7 @@ vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m(
@@ -661,7 +661,7 @@ vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m(
@@ -670,7 +670,7 @@ vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m(
@@ -679,7 +679,7 @@ vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m(
@@ -688,7 +688,7 @@ vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m(
@@ -697,7 +697,7 @@ vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m(
@@ -706,7 +706,7 @@ vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m(
@@ -715,7 +715,7 @@ vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m(
@@ -724,7 +724,7 @@ vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m(
@@ -733,7 +733,7 @@ vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m(
@@ -742,7 +742,7 @@ vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m(
@@ -751,7 +751,7 @@ vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m(
@@ -760,7 +760,7 @@ vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m(
@@ -769,7 +769,7 @@ vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m(
@@ -778,7 +778,7 @@ vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m(
@@ -787,7 +787,7 @@ vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m(
@@ -796,6 +796,6 @@ vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsle(mask, op1, op2, vl);
+ return __riscv_vmsle(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsleu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsleu.c
index 7107463ab48b..64970273bc96 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsleu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsleu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsleu(op1, op2, vl);
+ return __riscv_vmsleu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m(
@@ -409,7 +409,7 @@ vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m(
@@ -418,7 +418,7 @@ vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m(
@@ -427,7 +427,7 @@ vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m(
@@ -436,7 +436,7 @@ vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m(
@@ -445,7 +445,7 @@ vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m(
@@ -454,7 +454,7 @@ vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m(
@@ -463,7 +463,7 @@ vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m(
@@ -472,7 +472,7 @@ vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m(
@@ -481,7 +481,7 @@ vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m(
@@ -490,7 +490,7 @@ vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m(
@@ -499,7 +499,7 @@ vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m(
@@ -508,7 +508,7 @@ vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m(
@@ -517,7 +517,7 @@ vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m(
@@ -526,7 +526,7 @@ vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m(
@@ -535,7 +535,7 @@ vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m(
@@ -544,7 +544,7 @@ vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m(
@@ -553,7 +553,7 @@ vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m(
@@ -562,7 +562,7 @@ vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m(
@@ -571,7 +571,7 @@ vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m(
@@ -580,7 +580,7 @@ vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m(
@@ -589,7 +589,7 @@ vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m(
@@ -598,7 +598,7 @@ vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m(
@@ -607,7 +607,7 @@ vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m(
@@ -616,7 +616,7 @@ vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m(
@@ -625,7 +625,7 @@ vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m(
@@ -634,7 +634,7 @@ vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m(
@@ -643,7 +643,7 @@ vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m(
@@ -652,7 +652,7 @@ vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m(
@@ -661,7 +661,7 @@ vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m(
@@ -670,7 +670,7 @@ vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m(
@@ -679,7 +679,7 @@ vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m(
@@ -688,7 +688,7 @@ vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m(
@@ -697,7 +697,7 @@ vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m(
@@ -706,7 +706,7 @@ vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m(
@@ -715,7 +715,7 @@ vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m(
@@ -724,7 +724,7 @@ vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m(
@@ -733,7 +733,7 @@ vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m(
@@ -742,7 +742,7 @@ vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m(
@@ -751,7 +751,7 @@ vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m(
@@ -760,7 +760,7 @@ vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m(
@@ -769,7 +769,7 @@ vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m(
@@ -778,7 +778,7 @@ vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m(
@@ -787,7 +787,7 @@ vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m(
@@ -796,6 +796,6 @@ vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsleu(mask, op1, op2, vl);
+ return __riscv_vmsleu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmslt.c
index a89664ab6a63..2c04cebda681 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmslt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmslt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmslt(op1, op2, vl);
+ return __riscv_vmslt(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m(
@@ -409,7 +409,7 @@ vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m(
@@ -418,7 +418,7 @@ vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m(
@@ -427,7 +427,7 @@ vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m(
@@ -436,7 +436,7 @@ vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m(
@@ -445,7 +445,7 @@ vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m(
@@ -454,7 +454,7 @@ vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m(
@@ -463,7 +463,7 @@ vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m(
@@ -472,7 +472,7 @@ vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m(
@@ -481,7 +481,7 @@ vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m(
@@ -490,7 +490,7 @@ vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m(
@@ -499,7 +499,7 @@ vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m(
@@ -508,7 +508,7 @@ vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m(
@@ -517,7 +517,7 @@ vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m(
@@ -526,7 +526,7 @@ vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m(
@@ -535,7 +535,7 @@ vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m(
@@ -544,7 +544,7 @@ vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m(
@@ -553,7 +553,7 @@ vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m(
@@ -562,7 +562,7 @@ vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m(
@@ -571,7 +571,7 @@ vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m(
@@ -580,7 +580,7 @@ vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m(
@@ -589,7 +589,7 @@ vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m(
@@ -598,7 +598,7 @@ vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m(
@@ -607,7 +607,7 @@ vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m(
@@ -616,7 +616,7 @@ vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m(
@@ -625,7 +625,7 @@ vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m(
@@ -634,7 +634,7 @@ vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m(
@@ -643,7 +643,7 @@ vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m(
@@ -652,7 +652,7 @@ vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m(
@@ -661,7 +661,7 @@ vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m(
@@ -670,7 +670,7 @@ vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m(
@@ -679,7 +679,7 @@ vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m(
@@ -688,7 +688,7 @@ vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m(
@@ -697,7 +697,7 @@ vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m(
@@ -706,7 +706,7 @@ vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m(
@@ -715,7 +715,7 @@ vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m(
@@ -724,7 +724,7 @@ vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m(
@@ -733,7 +733,7 @@ vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m(
@@ -742,7 +742,7 @@ vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m(
@@ -751,7 +751,7 @@ vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m(
@@ -760,7 +760,7 @@ vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m(
@@ -769,7 +769,7 @@ vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m(
@@ -778,7 +778,7 @@ vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m(
@@ -787,7 +787,7 @@ vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m(
@@ -796,6 +796,6 @@ vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmslt(mask, op1, op2, vl);
+ return __riscv_vmslt(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsltu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsltu.c
index 48a10b55bfda..8f00346c9a19 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsltu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsltu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsltu(op1, op2, vl);
+ return __riscv_vmsltu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m(
@@ -409,7 +409,7 @@ vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m(
@@ -418,7 +418,7 @@ vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m(
@@ -427,7 +427,7 @@ vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m(
@@ -436,7 +436,7 @@ vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m(
@@ -445,7 +445,7 @@ vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m(
@@ -454,7 +454,7 @@ vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m(
@@ -463,7 +463,7 @@ vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m(
@@ -472,7 +472,7 @@ vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m(
@@ -481,7 +481,7 @@ vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m(
@@ -490,7 +490,7 @@ vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m(
@@ -499,7 +499,7 @@ vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m(
@@ -508,7 +508,7 @@ vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m(
@@ -517,7 +517,7 @@ vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m(
@@ -526,7 +526,7 @@ vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m(
@@ -535,7 +535,7 @@ vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m(
@@ -544,7 +544,7 @@ vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m(
@@ -553,7 +553,7 @@ vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m(
@@ -562,7 +562,7 @@ vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m(
@@ -571,7 +571,7 @@ vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m(
@@ -580,7 +580,7 @@ vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m(
@@ -589,7 +589,7 @@ vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m(
@@ -598,7 +598,7 @@ vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m(
@@ -607,7 +607,7 @@ vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m(
@@ -616,7 +616,7 @@ vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m(
@@ -625,7 +625,7 @@ vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m(
@@ -634,7 +634,7 @@ vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m(
@@ -643,7 +643,7 @@ vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m(
@@ -652,7 +652,7 @@ vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m(
@@ -661,7 +661,7 @@ vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m(
@@ -670,7 +670,7 @@ vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m(
@@ -679,7 +679,7 @@ vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m(
@@ -688,7 +688,7 @@ vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m(
@@ -697,7 +697,7 @@ vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m(
@@ -706,7 +706,7 @@ vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m(
@@ -715,7 +715,7 @@ vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m(
@@ -724,7 +724,7 @@ vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m(
@@ -733,7 +733,7 @@ vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m(
@@ -742,7 +742,7 @@ vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m(
@@ -751,7 +751,7 @@ vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m(
@@ -760,7 +760,7 @@ vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m(
@@ -769,7 +769,7 @@ vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m(
@@ -778,7 +778,7 @@ vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m(
@@ -787,7 +787,7 @@ vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m(
@@ -796,6 +796,6 @@ vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsltu(mask, op1, op2, vl);
+ return __riscv_vmsltu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsne.c
index 6955c818b011..e94974789893 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsne.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64(
@@ -22,7 +22,7 @@ vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32(
@@ -31,7 +31,7 @@ vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32(
@@ -40,7 +40,7 @@ vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16(
@@ -49,7 +49,7 @@ vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16(
@@ -58,7 +58,7 @@ vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8(
@@ -67,7 +67,7 @@ vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8(
@@ -76,7 +76,7 @@ vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4(
@@ -85,7 +85,7 @@ vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4(
@@ -94,7 +94,7 @@ vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2(
@@ -103,7 +103,7 @@ vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2(
@@ -112,7 +112,7 @@ vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1(
@@ -121,7 +121,7 @@ vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1(
@@ -130,7 +130,7 @@ vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64(
@@ -139,7 +139,7 @@ vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64(
@@ -148,7 +148,7 @@ vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32(
@@ -157,7 +157,7 @@ vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32(
@@ -166,7 +166,7 @@ vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16(
@@ -175,7 +175,7 @@ vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16(
@@ -184,7 +184,7 @@ vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8(
@@ -193,7 +193,7 @@ vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8(
@@ -202,7 +202,7 @@ vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4(
@@ -211,7 +211,7 @@ vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4(
@@ -220,7 +220,7 @@ vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2(
@@ -229,7 +229,7 @@ vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2(
@@ -238,7 +238,7 @@ vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64(
@@ -247,7 +247,7 @@ vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64(
@@ -256,7 +256,7 @@ vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32(
@@ -265,7 +265,7 @@ vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32(
@@ -274,7 +274,7 @@ vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16(
@@ -283,7 +283,7 @@ vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16(
@@ -292,7 +292,7 @@ vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8(
@@ -301,7 +301,7 @@ vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8(
@@ -310,7 +310,7 @@ vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4(
@@ -319,7 +319,7 @@ vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4(
@@ -328,7 +328,7 @@ vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64(
@@ -337,7 +337,7 @@ vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64(
@@ -346,7 +346,7 @@ vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32(
@@ -355,7 +355,7 @@ vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32(
@@ -364,7 +364,7 @@ vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16(
@@ -373,7 +373,7 @@ vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16(
@@ -382,7 +382,7 @@ vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8(
@@ -391,7 +391,7 @@ vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8(
@@ -400,7 +400,7 @@ vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64(
@@ -409,7 +409,7 @@ vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64(
@@ -418,7 +418,7 @@ vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32(
@@ -427,7 +427,7 @@ vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32(
@@ -436,7 +436,7 @@ vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16(
@@ -445,7 +445,7 @@ vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16(
@@ -454,7 +454,7 @@ vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8(
@@ -463,7 +463,7 @@ vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8(
@@ -472,7 +472,7 @@ vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4(
@@ -481,7 +481,7 @@ vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4(
@@ -490,7 +490,7 @@ vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2(
@@ -499,7 +499,7 @@ vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2(
@@ -508,7 +508,7 @@ vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1(
@@ -517,7 +517,7 @@ vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1(
@@ -526,7 +526,7 @@ vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64(
@@ -535,7 +535,7 @@ vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64(
@@ -544,7 +544,7 @@ vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32(
@@ -553,7 +553,7 @@ vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32(
@@ -562,7 +562,7 @@ vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16(
@@ -571,7 +571,7 @@ vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16(
@@ -580,7 +580,7 @@ vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8(
@@ -589,7 +589,7 @@ vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8(
@@ -598,7 +598,7 @@ vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4(
@@ -607,7 +607,7 @@ vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4(
@@ -616,7 +616,7 @@ vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2(
@@ -625,7 +625,7 @@ vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2(
@@ -634,7 +634,7 @@ vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64(
@@ -643,7 +643,7 @@ vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64(
@@ -652,7 +652,7 @@ vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32(
@@ -661,7 +661,7 @@ vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32(
@@ -670,7 +670,7 @@ vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16(
@@ -679,7 +679,7 @@ vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16(
@@ -688,7 +688,7 @@ vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8(
@@ -697,7 +697,7 @@ vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8(
@@ -706,7 +706,7 @@ vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4(
@@ -715,7 +715,7 @@ vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4(
@@ -724,7 +724,7 @@ vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64(
@@ -733,7 +733,7 @@ vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64(
@@ -742,7 +742,7 @@ vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32(
@@ -751,7 +751,7 @@ vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32(
@@ -760,7 +760,7 @@ vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16(
@@ -769,7 +769,7 @@ vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16(
@@ -778,7 +778,7 @@ vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8(
@@ -787,7 +787,7 @@ vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8(
@@ -796,7 +796,7 @@ vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsne(op1, op2, vl);
+ return __riscv_vmsne(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m(
@@ -805,7 +805,7 @@ vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m(
@@ -814,7 +814,7 @@ vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m(
@@ -823,7 +823,7 @@ vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m(
@@ -832,7 +832,7 @@ vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m(
@@ -841,7 +841,7 @@ vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m(
@@ -850,7 +850,7 @@ vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m(
@@ -859,7 +859,7 @@ vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m(
@@ -868,7 +868,7 @@ vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m(
@@ -877,7 +877,7 @@ vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m(
@@ -886,7 +886,7 @@ vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m(
@@ -895,7 +895,7 @@ vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m(
@@ -904,7 +904,7 @@ vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m(
@@ -913,7 +913,7 @@ vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m(
@@ -922,7 +922,7 @@ vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m(
@@ -931,7 +931,7 @@ vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m(
@@ -940,7 +940,7 @@ vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m(
@@ -949,7 +949,7 @@ vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m(
@@ -958,7 +958,7 @@ vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m(
@@ -967,7 +967,7 @@ vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m(
@@ -976,7 +976,7 @@ vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m(
@@ -985,7 +985,7 @@ vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m(
@@ -994,7 +994,7 @@ vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m(
@@ -1003,7 +1003,7 @@ vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m(
@@ -1012,7 +1012,7 @@ vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m(
@@ -1021,7 +1021,7 @@ vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m(
@@ -1030,7 +1030,7 @@ vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m(
@@ -1039,7 +1039,7 @@ vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m(
@@ -1048,7 +1048,7 @@ vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m(
@@ -1057,7 +1057,7 @@ vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m(
@@ -1066,7 +1066,7 @@ vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m(
@@ -1075,7 +1075,7 @@ vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m(
@@ -1084,7 +1084,7 @@ vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m(
@@ -1093,7 +1093,7 @@ vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m(
@@ -1102,7 +1102,7 @@ vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m(
@@ -1111,7 +1111,7 @@ vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m(
@@ -1120,7 +1120,7 @@ vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m(
@@ -1129,7 +1129,7 @@ vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m(
@@ -1138,7 +1138,7 @@ vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m(
@@ -1147,7 +1147,7 @@ vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m(
@@ -1156,7 +1156,7 @@ vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m(
@@ -1165,7 +1165,7 @@ vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m(
@@ -1174,7 +1174,7 @@ vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m(
@@ -1183,7 +1183,7 @@ vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m(
@@ -1192,7 +1192,7 @@ vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m(
@@ -1201,7 +1201,7 @@ vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m(
@@ -1210,7 +1210,7 @@ vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m(
@@ -1219,7 +1219,7 @@ vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m(
@@ -1228,7 +1228,7 @@ vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m(
@@ -1237,7 +1237,7 @@ vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m(
@@ -1246,7 +1246,7 @@ vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m(
@@ -1255,7 +1255,7 @@ vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m(
@@ -1264,7 +1264,7 @@ vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m(
@@ -1273,7 +1273,7 @@ vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m(
@@ -1282,7 +1282,7 @@ vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m(
@@ -1291,7 +1291,7 @@ vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m(
@@ -1300,7 +1300,7 @@ vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m(
@@ -1309,7 +1309,7 @@ vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m(
@@ -1318,7 +1318,7 @@ vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m(
@@ -1327,7 +1327,7 @@ vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m(
@@ -1336,7 +1336,7 @@ vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m(
@@ -1345,7 +1345,7 @@ vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m(
@@ -1354,7 +1354,7 @@ vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m(
@@ -1363,7 +1363,7 @@ vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m(
@@ -1372,7 +1372,7 @@ vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m(
@@ -1381,7 +1381,7 @@ vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m(
@@ -1390,7 +1390,7 @@ vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m(
@@ -1399,7 +1399,7 @@ vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m(
@@ -1408,7 +1408,7 @@ vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m(
@@ -1417,7 +1417,7 @@ vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m(
@@ -1426,7 +1426,7 @@ vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m(
@@ -1435,7 +1435,7 @@ vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m(
@@ -1444,7 +1444,7 @@ vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m(
@@ -1453,7 +1453,7 @@ vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m(
@@ -1462,7 +1462,7 @@ vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m(
@@ -1471,7 +1471,7 @@ vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m(
@@ -1480,7 +1480,7 @@ vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m(
@@ -1489,7 +1489,7 @@ vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m(
@@ -1498,7 +1498,7 @@ vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m(
@@ -1507,7 +1507,7 @@ vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m(
@@ -1516,7 +1516,7 @@ vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m(
@@ -1525,7 +1525,7 @@ vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m(
@@ -1534,7 +1534,7 @@ vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m(
@@ -1543,7 +1543,7 @@ vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m(
@@ -1552,7 +1552,7 @@ vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m(
@@ -1561,7 +1561,7 @@ vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m(
@@ -1570,7 +1570,7 @@ vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m(
@@ -1579,7 +1579,7 @@ vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m(
@@ -1588,6 +1588,6 @@ vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsne(mask, op1, op2, vl);
+ return __riscv_vmsne(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsof.c
index 37be28e9e3a5..1f60490e9e7a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsof.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsof.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) {
- return vmsof(op1, vl);
+ return __riscv_vmsof(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) {
- return vmsof(op1, vl);
+ return __riscv_vmsof(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) {
- return vmsof(op1, vl);
+ return __riscv_vmsof(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) {
- return vmsof(op1, vl);
+ return __riscv_vmsof(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) {
- return vmsof(op1, vl);
+ return __riscv_vmsof(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) {
- return vmsof(op1, vl);
+ return __riscv_vmsof(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b64(
@@ -66,7 +66,7 @@ vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) {
- return vmsof(op1, vl);
+ return __riscv_vmsof(op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b1_m(
@@ -75,7 +75,7 @@ vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
- return vmsof(mask, op1, vl);
+ return __riscv_vmsof(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b2_m(
@@ -84,7 +84,7 @@ vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
- return vmsof(mask, op1, vl);
+ return __riscv_vmsof(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b4_m(
@@ -93,7 +93,7 @@ vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
- return vmsof(mask, op1, vl);
+ return __riscv_vmsof(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b8_m(
@@ -102,7 +102,7 @@ vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
- return vmsof(mask, op1, vl);
+ return __riscv_vmsof(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b16_m(
@@ -111,7 +111,7 @@ vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
- return vmsof(mask, op1, vl);
+ return __riscv_vmsof(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b32_m(
@@ -120,7 +120,7 @@ vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
- return vmsof(mask, op1, vl);
+ return __riscv_vmsof(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b64_m(
@@ -129,6 +129,6 @@ vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
- return vmsof(mask, op1, vl);
+ return __riscv_vmsof(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmul.c
index 0973aa49b6b3..432d16715965 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmul.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
@@ -408,7 +408,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
@@ -471,7 +471,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
@@ -480,7 +480,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
@@ -489,7 +489,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
@@ -498,7 +498,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
@@ -507,7 +507,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
@@ -516,7 +516,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
@@ -525,7 +525,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
@@ -534,7 +534,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
@@ -579,7 +579,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
@@ -588,7 +588,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
@@ -597,7 +597,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
@@ -606,7 +606,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
@@ -615,7 +615,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
@@ -624,7 +624,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
@@ -633,7 +633,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
@@ -642,7 +642,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
@@ -669,7 +669,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
@@ -678,7 +678,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
@@ -687,7 +687,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
@@ -696,7 +696,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
@@ -705,7 +705,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
@@ -714,7 +714,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
@@ -723,7 +723,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
@@ -732,7 +732,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
@@ -741,7 +741,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
@@ -750,7 +750,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
@@ -759,7 +759,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
@@ -768,7 +768,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
@@ -777,7 +777,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
@@ -786,7 +786,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
@@ -795,7 +795,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmul(op1, op2, vl);
+ return __riscv_vmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
@@ -804,7 +804,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m(
@@ -813,7 +813,7 @@ vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m(
@@ -822,7 +822,7 @@ vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m(
@@ -831,7 +831,7 @@ vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m(
@@ -840,7 +840,7 @@ vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m(
@@ -849,7 +849,7 @@ vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m(
@@ -858,7 +858,7 @@ vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m(
@@ -867,7 +867,7 @@ vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m(
@@ -876,7 +876,7 @@ vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m(
@@ -885,7 +885,7 @@ vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m(
@@ -894,7 +894,7 @@ vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m(
@@ -903,7 +903,7 @@ vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m(
@@ -912,7 +912,7 @@ vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m(
@@ -921,7 +921,7 @@ vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m(
@@ -930,7 +930,7 @@ vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m(
@@ -939,7 +939,7 @@ vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m(
@@ -948,7 +948,7 @@ vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m(
@@ -957,7 +957,7 @@ vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m(
@@ -966,7 +966,7 @@ vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m(
@@ -975,7 +975,7 @@ vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m(
@@ -984,7 +984,7 @@ vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m(
@@ -993,7 +993,7 @@ vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmul(mask, op1, op2, vl);
+ return __riscv_vmul(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulh.c
index 8bcd378978f7..94d944eb214d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulh.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulh.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhsu.c
index fcb577154de4..6a3905ad16c3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhsu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhsu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhu.c
index 35c82121b055..efddc013a42d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c
index 0bddd22f207d..26ce797588a4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4(
@@ -22,7 +22,7 @@ vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2(
@@ -31,7 +31,7 @@ vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8m1(
@@ -40,7 +40,7 @@ vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8m2(
@@ -49,7 +49,7 @@ vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8m4(
@@ -58,7 +58,7 @@ vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8m8(
@@ -67,7 +67,7 @@ vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4(
@@ -76,7 +76,7 @@ vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2(
@@ -85,7 +85,7 @@ vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16m1(
@@ -94,7 +94,7 @@ vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16m2(
@@ -103,7 +103,7 @@ vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16m4(
@@ -112,7 +112,7 @@ vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16m8(
@@ -121,7 +121,7 @@ vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2(
@@ -130,7 +130,7 @@ vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32m1(
@@ -139,7 +139,7 @@ vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32m2(
@@ -148,7 +148,7 @@ vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32m4(
@@ -157,7 +157,7 @@ vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32m8(
@@ -166,7 +166,7 @@ vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i64m1(
@@ -175,7 +175,7 @@ vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i64m2(
@@ -184,7 +184,7 @@ vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i64m4(
@@ -193,7 +193,7 @@ vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i64m8(
@@ -202,7 +202,7 @@ vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8(
@@ -211,7 +211,7 @@ vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4(
@@ -220,7 +220,7 @@ vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2(
@@ -229,7 +229,7 @@ vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8m1(
@@ -238,7 +238,7 @@ vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8m2(
@@ -247,7 +247,7 @@ vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8m4(
@@ -256,7 +256,7 @@ vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8m8(
@@ -265,7 +265,7 @@ vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4(
@@ -274,7 +274,7 @@ vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2(
@@ -283,7 +283,7 @@ vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16m1(
@@ -292,7 +292,7 @@ vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16m2(
@@ -301,7 +301,7 @@ vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16m4(
@@ -310,7 +310,7 @@ vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16m8(
@@ -319,7 +319,7 @@ vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2(
@@ -328,7 +328,7 @@ vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32m1(
@@ -337,7 +337,7 @@ vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32m2(
@@ -346,7 +346,7 @@ vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32m4(
@@ -355,7 +355,7 @@ vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32m8(
@@ -364,7 +364,7 @@ vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u64m1(
@@ -373,7 +373,7 @@ vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u64m2(
@@ -382,7 +382,7 @@ vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u64m4(
@@ -391,7 +391,7 @@ vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u64m8(
@@ -400,7 +400,7 @@ vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4(
@@ -409,7 +409,7 @@ vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vmv_v_v_f16mf4(vfloat16mf4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2(
@@ -418,7 +418,7 @@ vfloat16mf4_t test_vmv_v_v_f16mf4(vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vmv_v_v_f16mf2(vfloat16mf2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1(
@@ -427,7 +427,7 @@ vfloat16mf2_t test_vmv_v_v_f16mf2(vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2(
@@ -436,7 +436,7 @@ vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vmv_v_v_f16m2(vfloat16m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4(
@@ -445,7 +445,7 @@ vfloat16m2_t test_vmv_v_v_f16m2(vfloat16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vmv_v_v_f16m4(vfloat16m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8(
@@ -454,7 +454,7 @@ vfloat16m4_t test_vmv_v_v_f16m4(vfloat16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vmv_v_v_f16m8(vfloat16m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2(
@@ -463,7 +463,7 @@ vfloat16m8_t test_vmv_v_v_f16m8(vfloat16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32m1(
@@ -472,7 +472,7 @@ vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32m2(
@@ -481,7 +481,7 @@ vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32m4(
@@ -490,7 +490,7 @@ vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32m8(
@@ -499,7 +499,7 @@ vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f64m1(
@@ -508,7 +508,7 @@ vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f64m2(
@@ -517,7 +517,7 @@ vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f64m4(
@@ -526,7 +526,7 @@ vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f64m8(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) {
- return vmv_v(src, vl);
+ return __riscv_vmv_v(src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i8mf8_i8(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8(
@@ -553,7 +553,7 @@ int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8(
@@ -562,7 +562,7 @@ int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8(
@@ -571,7 +571,7 @@ int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8(
@@ -580,7 +580,7 @@ int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8(
@@ -589,7 +589,7 @@ int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8(
@@ -598,7 +598,7 @@ int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16(
@@ -607,7 +607,7 @@ int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16(
@@ -616,7 +616,7 @@ int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16(
@@ -625,7 +625,7 @@ int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16(
@@ -634,7 +634,7 @@ int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16(
@@ -643,7 +643,7 @@ int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16(
@@ -652,7 +652,7 @@ int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32(
@@ -661,7 +661,7 @@ int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32(
@@ -670,7 +670,7 @@ int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32(
@@ -679,7 +679,7 @@ int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32(
@@ -688,7 +688,7 @@ int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32(
@@ -697,7 +697,7 @@ int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64(
@@ -706,7 +706,7 @@ int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64(
@@ -715,7 +715,7 @@ int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64(
@@ -724,7 +724,7 @@ int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64(
@@ -733,7 +733,7 @@ int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8(
@@ -742,7 +742,7 @@ int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8(
@@ -751,7 +751,7 @@ uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8(
@@ -760,7 +760,7 @@ uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8(
@@ -769,7 +769,7 @@ uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8(
@@ -778,7 +778,7 @@ uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8(
@@ -787,7 +787,7 @@ uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8(
@@ -796,7 +796,7 @@ uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) {
// CHECK-RV64-NEXT: ret i8 [[TMP0]]
//
uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16(
@@ -805,7 +805,7 @@ uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16(
@@ -814,7 +814,7 @@ uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16(
@@ -823,7 +823,7 @@ uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16(
@@ -832,7 +832,7 @@ uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16(
@@ -841,7 +841,7 @@ uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16(
@@ -850,7 +850,7 @@ uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) {
// CHECK-RV64-NEXT: ret i16 [[TMP0]]
//
uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32(
@@ -859,7 +859,7 @@ uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32(
@@ -868,7 +868,7 @@ uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32(
@@ -877,7 +877,7 @@ uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32(
@@ -886,7 +886,7 @@ uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32(
@@ -895,7 +895,7 @@ uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) {
// CHECK-RV64-NEXT: ret i32 [[TMP0]]
//
uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64(
@@ -904,7 +904,7 @@ uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64(
@@ -913,7 +913,7 @@ uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64(
@@ -922,7 +922,7 @@ uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
// CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64(
@@ -931,6 +931,6 @@ uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) {
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) {
- return vmv_x(src);
+ return __riscv_vmv_x(src);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxnor.c
index 8be2c6cf8f5b..79ce870b534e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxnor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxnor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmxnor(op1, op2, vl);
+ return __riscv_vmxnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxnor_mm_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmxnor(op1, op2, vl);
+ return __riscv_vmxnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxnor_mm_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmxnor(op1, op2, vl);
+ return __riscv_vmxnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxnor_mm_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmxnor(op1, op2, vl);
+ return __riscv_vmxnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxnor_mm_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmxnor(op1, op2, vl);
+ return __riscv_vmxnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxnor_mm_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmxnor(op1, op2, vl);
+ return __riscv_vmxnor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxnor_mm_b64(
@@ -66,6 +66,6 @@ vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmxnor(op1, op2, vl);
+ return __riscv_vmxnor(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxor.c
index b37dee45effc..65fa529088d0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmxor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmxor(op1, op2, vl);
+ return __riscv_vmxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxor_mm_b2(
@@ -21,7 +21,7 @@ vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmxor(op1, op2, vl);
+ return __riscv_vmxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxor_mm_b4(
@@ -30,7 +30,7 @@ vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmxor(op1, op2, vl);
+ return __riscv_vmxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxor_mm_b8(
@@ -39,7 +39,7 @@ vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmxor(op1, op2, vl);
+ return __riscv_vmxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxor_mm_b16(
@@ -48,7 +48,7 @@ vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmxor(op1, op2, vl);
+ return __riscv_vmxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxor_mm_b32(
@@ -57,7 +57,7 @@ vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmxor(op1, op2, vl);
+ return __riscv_vmxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmxor_mm_b64(
@@ -66,6 +66,6 @@ vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmxor(op1, op2, vl);
+ return __riscv_vmxor(op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c
index bcdb352a4b04..b54000aeee94 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
@@ -120,7 +120,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
@@ -129,7 +129,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
@@ -138,7 +138,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
@@ -147,7 +147,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
@@ -156,7 +156,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
@@ -165,7 +165,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
@@ -174,7 +174,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
@@ -183,7 +183,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
@@ -192,7 +192,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
@@ -201,7 +201,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
@@ -210,7 +210,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
@@ -219,7 +219,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
@@ -228,7 +228,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
@@ -237,7 +237,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
@@ -246,7 +246,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
@@ -255,7 +255,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
@@ -264,7 +264,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
@@ -273,7 +273,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
- return vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m(
@@ -282,7 +282,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m(
@@ -291,7 +291,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m(
@@ -300,7 +300,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m(
@@ -309,7 +309,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m(
@@ -318,7 +318,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m(
@@ -327,7 +327,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m(
@@ -336,7 +336,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m(
@@ -345,7 +345,7 @@ vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m(
@@ -354,7 +354,7 @@ vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m(
@@ -363,7 +363,7 @@ vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m(
@@ -372,7 +372,7 @@ vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m(
@@ -381,7 +381,7 @@ vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m(
@@ -390,7 +390,7 @@ vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m(
@@ -399,7 +399,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m(
@@ -408,7 +408,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m(
@@ -417,7 +417,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m(
@@ -426,7 +426,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m(
@@ -435,7 +435,7 @@ vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m(
@@ -444,7 +444,7 @@ vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m(
@@ -453,7 +453,7 @@ vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m(
@@ -462,7 +462,7 @@ vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m(
@@ -471,7 +471,7 @@ vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m(
@@ -480,7 +480,7 @@ vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m(
@@ -489,7 +489,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m(
@@ -498,7 +498,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m(
@@ -507,7 +507,7 @@ vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m(
@@ -516,7 +516,7 @@ vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m(
@@ -525,7 +525,7 @@ vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m(
@@ -534,7 +534,7 @@ vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m(
@@ -543,6 +543,6 @@ vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) {
- return vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c
index dc5160350022..3566b080d0a6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
@@ -120,7 +120,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
@@ -129,7 +129,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
@@ -138,7 +138,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
@@ -147,7 +147,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
@@ -156,7 +156,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
@@ -165,7 +165,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
@@ -174,7 +174,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
@@ -183,7 +183,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
@@ -192,7 +192,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
@@ -201,7 +201,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
@@ -210,7 +210,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
@@ -219,7 +219,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
@@ -228,7 +228,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
@@ -237,7 +237,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
@@ -246,7 +246,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
@@ -255,7 +255,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
@@ -264,7 +264,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
@@ -273,7 +273,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
- return vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m(
@@ -282,7 +282,7 @@ vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m(
@@ -291,7 +291,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m(
@@ -300,7 +300,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m(
@@ -309,7 +309,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m(
@@ -318,7 +318,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m(
@@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m(
@@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m(
@@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m(
@@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m(
@@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m(
@@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m(
@@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m(
@@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m(
@@ -399,7 +399,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m(
@@ -408,7 +408,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m(
@@ -417,7 +417,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m(
@@ -426,7 +426,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m(
@@ -435,7 +435,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m(
@@ -444,7 +444,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m(
@@ -453,7 +453,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m(
@@ -462,7 +462,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m(
@@ -471,7 +471,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m(
@@ -480,7 +480,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m(
@@ -489,7 +489,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m(
@@ -498,7 +498,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m(
@@ -507,7 +507,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m(
@@ -516,7 +516,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m(
@@ -525,7 +525,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m(
@@ -534,7 +534,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m(
@@ -543,6 +543,6 @@ vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vncvt.c
index 36e63b9ef2fb..3a7c24d1956e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vncvt.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vncvt_x_x_w_i8mf8(vint16mf4_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4(
@@ -21,7 +21,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8(vint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vncvt_x_x_w_i8mf4(vint16mf2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2(
@@ -30,7 +30,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4(vint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vncvt_x_x_w_i8mf2(vint16m1_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1(
@@ -39,7 +39,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2(vint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vncvt_x_x_w_i8m1(vint16m2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2(
@@ -48,7 +48,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1(vint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vncvt_x_x_w_i8m2(vint16m4_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4(
@@ -57,7 +57,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2(vint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vncvt_x_x_w_i8m4(vint16m8_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8(
@@ -66,7 +66,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4(vint16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vncvt_x_x_w_u8mf8(vuint16mf4_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4(
@@ -75,7 +75,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8(vuint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vncvt_x_x_w_u8mf4(vuint16mf2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2(
@@ -84,7 +84,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4(vuint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vncvt_x_x_w_u8mf2(vuint16m1_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1(
@@ -93,7 +93,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2(vuint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vncvt_x_x_w_u8m1(vuint16m2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2(
@@ -102,7 +102,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1(vuint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vncvt_x_x_w_u8m2(vuint16m4_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4(
@@ -111,7 +111,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2(vuint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vncvt_x_x_w_u8m4(vuint16m8_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4(
@@ -120,7 +120,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4(vuint16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4(vint32mf2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2(
@@ -129,7 +129,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4(vint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vncvt_x_x_w_i16mf2(vint32m1_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1(
@@ -138,7 +138,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2(vint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vncvt_x_x_w_i16m1(vint32m2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2(
@@ -147,7 +147,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1(vint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vncvt_x_x_w_i16m2(vint32m4_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4(
@@ -156,7 +156,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2(vint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vncvt_x_x_w_i16m4(vint32m8_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4(
@@ -165,7 +165,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4(vint32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4(vuint32mf2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2(
@@ -174,7 +174,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vncvt_x_x_w_u16mf2(vuint32m1_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1(
@@ -183,7 +183,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2(vuint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vncvt_x_x_w_u16m1(vuint32m2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1(vuint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vncvt_x_x_w_u16m2(vuint32m4_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4(
@@ -201,7 +201,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2(vuint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vncvt_x_x_w_u16m4(vuint32m8_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2(
@@ -210,7 +210,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4(vuint32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vncvt_x_x_w_i32mf2(vint64m1_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1(
@@ -219,7 +219,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2(vint64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vncvt_x_x_w_i32m1(vint64m2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2(
@@ -228,7 +228,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1(vint64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vncvt_x_x_w_i32m2(vint64m4_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4(
@@ -237,7 +237,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2(vint64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vncvt_x_x_w_i32m4(vint64m8_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2(
@@ -246,7 +246,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4(vint64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vncvt_x_x_w_u32mf2(vuint64m1_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2(vuint64m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vncvt_x_x_w_u32m1(vuint64m2_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2(
@@ -264,7 +264,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1(vuint64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vncvt_x_x_w_u32m2(vuint64m4_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4(
@@ -273,7 +273,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2(vuint64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vncvt_x_x_w_u32m4(vuint64m8_t src, size_t vl) {
- return vncvt_x(src, vl);
+ return __riscv_vncvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_m(
@@ -282,7 +282,7 @@ vuint32m4_t test_vncvt_x_x_w_u32m4(vuint64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint16mf4_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_m(
@@ -291,7 +291,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint16mf4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint16mf2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_m(
@@ -300,7 +300,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint16mf2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint16m1_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_m(
@@ -309,7 +309,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint16m2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_m(
@@ -318,7 +318,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint16m4_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_m(
@@ -327,7 +327,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint16m8_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_m(
@@ -336,7 +336,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint16mf4_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_m(
@@ -345,7 +345,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint16mf4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint16mf2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_m(
@@ -354,7 +354,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint16mf2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint16m1_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_m(
@@ -363,7 +363,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint16m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint16m2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_m(
@@ -372,7 +372,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint16m4_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_m(
@@ -381,7 +381,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint16m8_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_m(
@@ -390,7 +390,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint16m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_m(
@@ -399,7 +399,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_m(
@@ -408,7 +408,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_m(
@@ -417,7 +417,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_m(
@@ -426,7 +426,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_m(
@@ -435,7 +435,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_m(
@@ -444,7 +444,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_m(
@@ -453,7 +453,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint32m1_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_m(
@@ -462,7 +462,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_m(
@@ -471,7 +471,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_m(
@@ -480,7 +480,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_m(
@@ -489,7 +489,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_m(
@@ -498,7 +498,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_m(
@@ -507,7 +507,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_m(
@@ -516,7 +516,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_m(
@@ -525,7 +525,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint64m1_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_m(
@@ -534,7 +534,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_m(
@@ -543,6 +543,6 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) {
- return vncvt_x(mask, src, vl);
+ return __riscv_vncvt_x(mask, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vneg.c
index 211ee726278a..e9d4d50d0795 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vneg.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vneg_v_i8mf8(vint8mf8_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4(
@@ -22,7 +22,7 @@ vint8mf8_t test_vneg_v_i8mf8(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vneg_v_i8mf4(vint8mf4_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2(
@@ -31,7 +31,7 @@ vint8mf4_t test_vneg_v_i8mf4(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vneg_v_i8mf2(vint8mf2_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m1(
@@ -40,7 +40,7 @@ vint8mf2_t test_vneg_v_i8mf2(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vneg_v_i8m1(vint8m1_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m2(
@@ -49,7 +49,7 @@ vint8m1_t test_vneg_v_i8m1(vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vneg_v_i8m2(vint8m2_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m4(
@@ -58,7 +58,7 @@ vint8m2_t test_vneg_v_i8m2(vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vneg_v_i8m4(vint8m4_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m8(
@@ -67,7 +67,7 @@ vint8m4_t test_vneg_v_i8m4(vint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vneg_v_i8m8(vint8m8_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4(
@@ -76,7 +76,7 @@ vint8m8_t test_vneg_v_i8m8(vint8m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vneg_v_i16mf4(vint16mf4_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2(
@@ -85,7 +85,7 @@ vint16mf4_t test_vneg_v_i16mf4(vint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vneg_v_i16mf2(vint16mf2_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m1(
@@ -94,7 +94,7 @@ vint16mf2_t test_vneg_v_i16mf2(vint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vneg_v_i16m1(vint16m1_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m2(
@@ -103,7 +103,7 @@ vint16m1_t test_vneg_v_i16m1(vint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vneg_v_i16m2(vint16m2_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m4(
@@ -112,7 +112,7 @@ vint16m2_t test_vneg_v_i16m2(vint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vneg_v_i16m4(vint16m4_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m8(
@@ -121,7 +121,7 @@ vint16m4_t test_vneg_v_i16m4(vint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vneg_v_i16m8(vint16m8_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2(
@@ -130,7 +130,7 @@ vint16m8_t test_vneg_v_i16m8(vint16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2(vint32mf2_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m1(
@@ -139,7 +139,7 @@ vint32mf2_t test_vneg_v_i32mf2(vint32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vneg_v_i32m1(vint32m1_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m2(
@@ -148,7 +148,7 @@ vint32m1_t test_vneg_v_i32m1(vint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vneg_v_i32m2(vint32m2_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m4(
@@ -157,7 +157,7 @@ vint32m2_t test_vneg_v_i32m2(vint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vneg_v_i32m4(vint32m4_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m8(
@@ -166,7 +166,7 @@ vint32m4_t test_vneg_v_i32m4(vint32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vneg_v_i32m8(vint32m8_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m1(
@@ -175,7 +175,7 @@ vint32m8_t test_vneg_v_i32m8(vint32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vneg_v_i64m1(vint64m1_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m2(
@@ -184,7 +184,7 @@ vint64m1_t test_vneg_v_i64m1(vint64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vneg_v_i64m2(vint64m2_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m4(
@@ -193,7 +193,7 @@ vint64m2_t test_vneg_v_i64m2(vint64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vneg_v_i64m4(vint64m4_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m8(
@@ -202,7 +202,7 @@ vint64m4_t test_vneg_v_i64m4(vint64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vneg_v_i64m8(vint64m8_t op1, size_t vl) {
- return vneg(op1, vl);
+ return __riscv_vneg(op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf8_m(
@@ -211,7 +211,7 @@ vint64m8_t test_vneg_v_i64m8(vint64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4_m(
@@ -220,7 +220,7 @@ vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2_m(
@@ -229,7 +229,7 @@ vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m1_m(
@@ -238,7 +238,7 @@ vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m2_m(
@@ -247,7 +247,7 @@ vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m4_m(
@@ -256,7 +256,7 @@ vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m8_m(
@@ -265,7 +265,7 @@ vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4_m(
@@ -274,7 +274,7 @@ vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2_m(
@@ -283,7 +283,7 @@ vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m1_m(
@@ -292,7 +292,7 @@ vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m2_m(
@@ -301,7 +301,7 @@ vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m4_m(
@@ -310,7 +310,7 @@ vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m8_m(
@@ -319,7 +319,7 @@ vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_m(
@@ -328,7 +328,7 @@ vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m1_m(
@@ -337,7 +337,7 @@ vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m2_m(
@@ -346,7 +346,7 @@ vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m4_m(
@@ -355,7 +355,7 @@ vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m8_m(
@@ -364,7 +364,7 @@ vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m1_m(
@@ -373,7 +373,7 @@ vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m2_m(
@@ -382,7 +382,7 @@ vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m4_m(
@@ -391,7 +391,7 @@ vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m8_m(
@@ -400,6 +400,6 @@ vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vneg_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) {
- return vneg(mask, op1, vl);
+ return __riscv_vneg(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c
index a3456823327e..6713945570c2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8(
@@ -22,7 +22,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4(
@@ -31,7 +31,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4(
@@ -40,7 +40,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2(
@@ -49,7 +49,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2(
@@ -58,7 +58,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1(
@@ -67,7 +67,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1(
@@ -76,7 +76,7 @@ vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2(
@@ -85,7 +85,7 @@ vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2(
@@ -94,7 +94,7 @@ vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4(
@@ -103,7 +103,7 @@ vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4(
@@ -112,7 +112,7 @@ vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8(
@@ -121,7 +121,7 @@ vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8(
@@ -130,7 +130,7 @@ vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4(
@@ -139,7 +139,7 @@ vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4(
@@ -148,7 +148,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2(
@@ -157,7 +157,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2(
@@ -166,7 +166,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1(
@@ -175,7 +175,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1(
@@ -184,7 +184,7 @@ vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2(
@@ -193,7 +193,7 @@ vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2(
@@ -202,7 +202,7 @@ vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4(
@@ -211,7 +211,7 @@ vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4(
@@ -220,7 +220,7 @@ vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8(
@@ -229,7 +229,7 @@ vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8(
@@ -238,7 +238,7 @@ vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2(
@@ -247,7 +247,7 @@ vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2(
@@ -256,7 +256,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1(
@@ -265,7 +265,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1(
@@ -274,7 +274,7 @@ vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2(
@@ -292,7 +292,7 @@ vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4(
@@ -301,7 +301,7 @@ vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4(
@@ -310,7 +310,7 @@ vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8(
@@ -319,7 +319,7 @@ vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8(
@@ -328,7 +328,7 @@ vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1(
@@ -337,7 +337,7 @@ vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1(
@@ -346,7 +346,7 @@ vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2(
@@ -355,7 +355,7 @@ vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2(
@@ -364,7 +364,7 @@ vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4(
@@ -373,7 +373,7 @@ vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4(
@@ -382,7 +382,7 @@ vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8(
@@ -391,7 +391,7 @@ vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8(
@@ -400,7 +400,7 @@ vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8(
@@ -409,7 +409,7 @@ vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1(
@@ -472,7 +472,7 @@ vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2(
@@ -481,7 +481,7 @@ vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2(
@@ -490,7 +490,7 @@ vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4(
@@ -499,7 +499,7 @@ vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4(
@@ -508,7 +508,7 @@ vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8(
@@ -517,7 +517,7 @@ vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8(
@@ -526,7 +526,7 @@ vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4(
@@ -535,7 +535,7 @@ vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1(
@@ -580,7 +580,7 @@ vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2(
@@ -589,7 +589,7 @@ vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2(
@@ -598,7 +598,7 @@ vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4(
@@ -607,7 +607,7 @@ vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4(
@@ -616,7 +616,7 @@ vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8(
@@ -625,7 +625,7 @@ vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8(
@@ -634,7 +634,7 @@ vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2(
@@ -643,7 +643,7 @@ vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1(
@@ -670,7 +670,7 @@ vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2(
@@ -679,7 +679,7 @@ vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2(
@@ -688,7 +688,7 @@ vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4(
@@ -697,7 +697,7 @@ vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4(
@@ -706,7 +706,7 @@ vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8(
@@ -715,7 +715,7 @@ vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8(
@@ -724,7 +724,7 @@ vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1(
@@ -733,7 +733,7 @@ vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1(
@@ -742,7 +742,7 @@ vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2(
@@ -751,7 +751,7 @@ vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2(
@@ -760,7 +760,7 @@ vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4(
@@ -769,7 +769,7 @@ vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4(
@@ -778,7 +778,7 @@ vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8(
@@ -787,7 +787,7 @@ vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac(vd, vs1, vs2, vl);
+ return __riscv_vnmsac(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8(
@@ -796,7 +796,7 @@ vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac(vd, rs1, vs2, vl);
+ return __riscv_vnmsac(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m(
@@ -805,7 +805,7 @@ vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m(
@@ -814,7 +814,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m(
@@ -823,7 +823,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m(
@@ -832,7 +832,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m(
@@ -841,7 +841,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m(
@@ -850,7 +850,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m(
@@ -859,7 +859,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m(
@@ -868,7 +868,7 @@ vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m(
@@ -877,7 +877,7 @@ vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m(
@@ -886,7 +886,7 @@ vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m(
@@ -895,7 +895,7 @@ vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m(
@@ -904,7 +904,7 @@ vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m(
@@ -913,7 +913,7 @@ vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m(
@@ -922,7 +922,7 @@ vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m(
@@ -931,7 +931,7 @@ vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m(
@@ -940,7 +940,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m(
@@ -949,7 +949,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m(
@@ -958,7 +958,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m(
@@ -967,7 +967,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m(
@@ -976,7 +976,7 @@ vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m(
@@ -985,7 +985,7 @@ vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m(
@@ -994,7 +994,7 @@ vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m(
@@ -1003,7 +1003,7 @@ vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m(
@@ -1012,7 +1012,7 @@ vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m(
@@ -1021,7 +1021,7 @@ vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m(
@@ -1030,7 +1030,7 @@ vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m(
@@ -1039,7 +1039,7 @@ vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m(
@@ -1048,7 +1048,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m(
@@ -1057,7 +1057,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m(
@@ -1066,7 +1066,7 @@ vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m(
@@ -1075,7 +1075,7 @@ vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m(
@@ -1084,7 +1084,7 @@ vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m(
@@ -1093,7 +1093,7 @@ vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m(
@@ -1102,7 +1102,7 @@ vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m(
@@ -1111,7 +1111,7 @@ vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m(
@@ -1120,7 +1120,7 @@ vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m(
@@ -1129,7 +1129,7 @@ vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m(
@@ -1138,7 +1138,7 @@ vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m(
@@ -1147,7 +1147,7 @@ vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m(
@@ -1156,7 +1156,7 @@ vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m(
@@ -1165,7 +1165,7 @@ vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m(
@@ -1174,7 +1174,7 @@ vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m(
@@ -1183,7 +1183,7 @@ vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m(
@@ -1192,7 +1192,7 @@ vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m(
@@ -1201,7 +1201,7 @@ vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m(
@@ -1210,7 +1210,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m(
@@ -1219,7 +1219,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m(
@@ -1228,7 +1228,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m(
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m(
@@ -1246,7 +1246,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m(
@@ -1255,7 +1255,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m(
@@ -1264,7 +1264,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m(
@@ -1273,7 +1273,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m(
@@ -1282,7 +1282,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m(
@@ -1300,7 +1300,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m(
@@ -1309,7 +1309,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m(
@@ -1318,7 +1318,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m(
@@ -1327,7 +1327,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m(
@@ -1336,7 +1336,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m(
@@ -1345,7 +1345,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m(
@@ -1354,7 +1354,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m(
@@ -1363,7 +1363,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m(
@@ -1372,7 +1372,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m(
@@ -1381,7 +1381,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m(
@@ -1390,7 +1390,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m(
@@ -1399,7 +1399,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m(
@@ -1408,7 +1408,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m(
@@ -1417,7 +1417,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m(
@@ -1426,7 +1426,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m(
@@ -1435,7 +1435,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m(
@@ -1444,7 +1444,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m(
@@ -1453,7 +1453,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m(
@@ -1462,7 +1462,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m(
@@ -1471,7 +1471,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m(
@@ -1480,7 +1480,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m(
@@ -1489,7 +1489,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m(
@@ -1498,7 +1498,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m(
@@ -1525,7 +1525,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m(
@@ -1534,7 +1534,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m(
@@ -1543,7 +1543,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m(
@@ -1552,7 +1552,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m(
@@ -1561,7 +1561,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m(
@@ -1570,7 +1570,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m(
@@ -1579,7 +1579,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m(
@@ -1588,6 +1588,6 @@ vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c
index 917b65b13826..8a607888b995 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8(
@@ -22,7 +22,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4(
@@ -31,7 +31,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4(
@@ -40,7 +40,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2(
@@ -49,7 +49,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2(
@@ -58,7 +58,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1(
@@ -67,7 +67,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1(
@@ -76,7 +76,7 @@ vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2(
@@ -85,7 +85,7 @@ vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2(
@@ -94,7 +94,7 @@ vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4(
@@ -103,7 +103,7 @@ vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4(
@@ -112,7 +112,7 @@ vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8(
@@ -121,7 +121,7 @@ vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8(
@@ -130,7 +130,7 @@ vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4(
@@ -139,7 +139,7 @@ vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4(
@@ -148,7 +148,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2(
@@ -157,7 +157,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2(
@@ -166,7 +166,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1(
@@ -175,7 +175,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1(
@@ -184,7 +184,7 @@ vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2(
@@ -193,7 +193,7 @@ vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2(
@@ -202,7 +202,7 @@ vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4(
@@ -211,7 +211,7 @@ vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4(
@@ -220,7 +220,7 @@ vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8(
@@ -229,7 +229,7 @@ vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8(
@@ -238,7 +238,7 @@ vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2(
@@ -247,7 +247,7 @@ vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2(
@@ -256,7 +256,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1(
@@ -265,7 +265,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1(
@@ -274,7 +274,7 @@ vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2(
@@ -292,7 +292,7 @@ vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4(
@@ -301,7 +301,7 @@ vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4(
@@ -310,7 +310,7 @@ vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8(
@@ -319,7 +319,7 @@ vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8(
@@ -328,7 +328,7 @@ vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1(
@@ -337,7 +337,7 @@ vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1(
@@ -346,7 +346,7 @@ vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2(
@@ -355,7 +355,7 @@ vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2(
@@ -364,7 +364,7 @@ vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4(
@@ -373,7 +373,7 @@ vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4(
@@ -382,7 +382,7 @@ vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8(
@@ -391,7 +391,7 @@ vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8(
@@ -400,7 +400,7 @@ vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8(
@@ -409,7 +409,7 @@ vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1(
@@ -472,7 +472,7 @@ vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2(
@@ -481,7 +481,7 @@ vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2(
@@ -490,7 +490,7 @@ vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4(
@@ -499,7 +499,7 @@ vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4(
@@ -508,7 +508,7 @@ vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8(
@@ -517,7 +517,7 @@ vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8(
@@ -526,7 +526,7 @@ vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4(
@@ -535,7 +535,7 @@ vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1(
@@ -580,7 +580,7 @@ vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2(
@@ -589,7 +589,7 @@ vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2(
@@ -598,7 +598,7 @@ vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4(
@@ -607,7 +607,7 @@ vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4(
@@ -616,7 +616,7 @@ vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8(
@@ -625,7 +625,7 @@ vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8(
@@ -634,7 +634,7 @@ vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2(
@@ -643,7 +643,7 @@ vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1(
@@ -670,7 +670,7 @@ vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2(
@@ -679,7 +679,7 @@ vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2(
@@ -688,7 +688,7 @@ vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4(
@@ -697,7 +697,7 @@ vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4(
@@ -706,7 +706,7 @@ vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8(
@@ -715,7 +715,7 @@ vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8(
@@ -724,7 +724,7 @@ vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1(
@@ -733,7 +733,7 @@ vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1(
@@ -742,7 +742,7 @@ vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2(
@@ -751,7 +751,7 @@ vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2(
@@ -760,7 +760,7 @@ vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4(
@@ -769,7 +769,7 @@ vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4(
@@ -778,7 +778,7 @@ vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8(
@@ -787,7 +787,7 @@ vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub(vd, vs1, vs2, vl);
+ return __riscv_vnmsub(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8(
@@ -796,7 +796,7 @@ vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub(vd, rs1, vs2, vl);
+ return __riscv_vnmsub(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m(
@@ -805,7 +805,7 @@ vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m(
@@ -814,7 +814,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m(
@@ -823,7 +823,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m(
@@ -832,7 +832,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m(
@@ -841,7 +841,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m(
@@ -850,7 +850,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m(
@@ -859,7 +859,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m(
@@ -868,7 +868,7 @@ vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m(
@@ -877,7 +877,7 @@ vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m(
@@ -886,7 +886,7 @@ vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m(
@@ -895,7 +895,7 @@ vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m(
@@ -904,7 +904,7 @@ vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m(
@@ -913,7 +913,7 @@ vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m(
@@ -922,7 +922,7 @@ vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m(
@@ -931,7 +931,7 @@ vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m(
@@ -940,7 +940,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m(
@@ -949,7 +949,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m(
@@ -958,7 +958,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m(
@@ -967,7 +967,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m(
@@ -976,7 +976,7 @@ vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m(
@@ -985,7 +985,7 @@ vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m(
@@ -994,7 +994,7 @@ vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m(
@@ -1003,7 +1003,7 @@ vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m(
@@ -1012,7 +1012,7 @@ vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m(
@@ -1021,7 +1021,7 @@ vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m(
@@ -1030,7 +1030,7 @@ vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m(
@@ -1039,7 +1039,7 @@ vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m(
@@ -1048,7 +1048,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m(
@@ -1057,7 +1057,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m(
@@ -1066,7 +1066,7 @@ vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m(
@@ -1075,7 +1075,7 @@ vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m(
@@ -1084,7 +1084,7 @@ vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m(
@@ -1093,7 +1093,7 @@ vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m(
@@ -1102,7 +1102,7 @@ vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m(
@@ -1111,7 +1111,7 @@ vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m(
@@ -1120,7 +1120,7 @@ vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m(
@@ -1129,7 +1129,7 @@ vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m(
@@ -1138,7 +1138,7 @@ vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m(
@@ -1147,7 +1147,7 @@ vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m(
@@ -1156,7 +1156,7 @@ vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m(
@@ -1165,7 +1165,7 @@ vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m(
@@ -1174,7 +1174,7 @@ vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m(
@@ -1183,7 +1183,7 @@ vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m(
@@ -1192,7 +1192,7 @@ vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m(
@@ -1201,7 +1201,7 @@ vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m(
@@ -1210,7 +1210,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m(
@@ -1219,7 +1219,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m(
@@ -1228,7 +1228,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m(
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m(
@@ -1246,7 +1246,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m(
@@ -1255,7 +1255,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m(
@@ -1264,7 +1264,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m(
@@ -1273,7 +1273,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m(
@@ -1282,7 +1282,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m(
@@ -1300,7 +1300,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m(
@@ -1309,7 +1309,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m(
@@ -1318,7 +1318,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m(
@@ -1327,7 +1327,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m(
@@ -1336,7 +1336,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m(
@@ -1345,7 +1345,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m(
@@ -1354,7 +1354,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m(
@@ -1363,7 +1363,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m(
@@ -1372,7 +1372,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m(
@@ -1381,7 +1381,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m(
@@ -1390,7 +1390,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m(
@@ -1399,7 +1399,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m(
@@ -1408,7 +1408,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m(
@@ -1417,7 +1417,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m(
@@ -1426,7 +1426,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m(
@@ -1435,7 +1435,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m(
@@ -1444,7 +1444,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m(
@@ -1453,7 +1453,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m(
@@ -1462,7 +1462,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m(
@@ -1471,7 +1471,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m(
@@ -1480,7 +1480,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m(
@@ -1489,7 +1489,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m(
@@ -1498,7 +1498,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m(
@@ -1525,7 +1525,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m(
@@ -1534,7 +1534,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m(
@@ -1543,7 +1543,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m(
@@ -1552,7 +1552,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m(
@@ -1561,7 +1561,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m(
@@ -1570,7 +1570,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m(
@@ -1579,7 +1579,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m(
@@ -1588,6 +1588,6 @@ vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnot.c
index ac2e3694d622..c0c2be8e2772 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnot.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnot_v_i8mf8(vint8mf8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4(
@@ -21,7 +21,7 @@ vint8mf8_t test_vnot_v_i8mf8(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnot_v_i8mf4(vint8mf4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2(
@@ -30,7 +30,7 @@ vint8mf4_t test_vnot_v_i8mf4(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnot_v_i8mf2(vint8mf2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m1(
@@ -39,7 +39,7 @@ vint8mf2_t test_vnot_v_i8mf2(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnot_v_i8m1(vint8m1_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m2(
@@ -48,7 +48,7 @@ vint8m1_t test_vnot_v_i8m1(vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnot_v_i8m2(vint8m2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m4(
@@ -57,7 +57,7 @@ vint8m2_t test_vnot_v_i8m2(vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnot_v_i8m4(vint8m4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m8(
@@ -66,7 +66,7 @@ vint8m4_t test_vnot_v_i8m4(vint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnot_v_i8m8(vint8m8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4(
@@ -75,7 +75,7 @@ vint8m8_t test_vnot_v_i8m8(vint8m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnot_v_i16mf4(vint16mf4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2(
@@ -84,7 +84,7 @@ vint16mf4_t test_vnot_v_i16mf4(vint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnot_v_i16mf2(vint16mf2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m1(
@@ -93,7 +93,7 @@ vint16mf2_t test_vnot_v_i16mf2(vint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnot_v_i16m1(vint16m1_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m2(
@@ -102,7 +102,7 @@ vint16m1_t test_vnot_v_i16m1(vint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnot_v_i16m2(vint16m2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m4(
@@ -111,7 +111,7 @@ vint16m2_t test_vnot_v_i16m2(vint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnot_v_i16m4(vint16m4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m8(
@@ -120,7 +120,7 @@ vint16m4_t test_vnot_v_i16m4(vint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnot_v_i16m8(vint16m8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2(
@@ -129,7 +129,7 @@ vint16m8_t test_vnot_v_i16m8(vint16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2(vint32mf2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m1(
@@ -138,7 +138,7 @@ vint32mf2_t test_vnot_v_i32mf2(vint32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnot_v_i32m1(vint32m1_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m2(
@@ -147,7 +147,7 @@ vint32m1_t test_vnot_v_i32m1(vint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnot_v_i32m2(vint32m2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m4(
@@ -156,7 +156,7 @@ vint32m2_t test_vnot_v_i32m2(vint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnot_v_i32m4(vint32m4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m8(
@@ -165,7 +165,7 @@ vint32m4_t test_vnot_v_i32m4(vint32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnot_v_i32m8(vint32m8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m1(
@@ -174,7 +174,7 @@ vint32m8_t test_vnot_v_i32m8(vint32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnot_v_i64m1(vint64m1_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m2(
@@ -183,7 +183,7 @@ vint64m1_t test_vnot_v_i64m1(vint64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnot_v_i64m2(vint64m2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m4(
@@ -192,7 +192,7 @@ vint64m2_t test_vnot_v_i64m2(vint64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnot_v_i64m4(vint64m4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m8(
@@ -201,7 +201,7 @@ vint64m4_t test_vnot_v_i64m4(vint64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnot_v_i64m8(vint64m8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8(
@@ -210,7 +210,7 @@ vint64m8_t test_vnot_v_i64m8(vint64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnot_v_u8mf8(vuint8mf8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4(
@@ -219,7 +219,7 @@ vuint8mf8_t test_vnot_v_u8mf8(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnot_v_u8mf4(vuint8mf4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2(
@@ -228,7 +228,7 @@ vuint8mf4_t test_vnot_v_u8mf4(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnot_v_u8mf2(vuint8mf2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m1(
@@ -237,7 +237,7 @@ vuint8mf2_t test_vnot_v_u8mf2(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnot_v_u8m1(vuint8m1_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m2(
@@ -246,7 +246,7 @@ vuint8m1_t test_vnot_v_u8m1(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnot_v_u8m2(vuint8m2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m4(
@@ -255,7 +255,7 @@ vuint8m2_t test_vnot_v_u8m2(vuint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnot_v_u8m4(vuint8m4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m8(
@@ -264,7 +264,7 @@ vuint8m4_t test_vnot_v_u8m4(vuint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnot_v_u8m8(vuint8m8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4(
@@ -273,7 +273,7 @@ vuint8m8_t test_vnot_v_u8m8(vuint8m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnot_v_u16mf4(vuint16mf4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2(
@@ -282,7 +282,7 @@ vuint16mf4_t test_vnot_v_u16mf4(vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnot_v_u16mf2(vuint16mf2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m1(
@@ -291,7 +291,7 @@ vuint16mf2_t test_vnot_v_u16mf2(vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnot_v_u16m1(vuint16m1_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m2(
@@ -300,7 +300,7 @@ vuint16m1_t test_vnot_v_u16m1(vuint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnot_v_u16m2(vuint16m2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m4(
@@ -309,7 +309,7 @@ vuint16m2_t test_vnot_v_u16m2(vuint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnot_v_u16m4(vuint16m4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m8(
@@ -318,7 +318,7 @@ vuint16m4_t test_vnot_v_u16m4(vuint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnot_v_u16m8(vuint16m8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2(
@@ -327,7 +327,7 @@ vuint16m8_t test_vnot_v_u16m8(vuint16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2(vuint32mf2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m1(
@@ -336,7 +336,7 @@ vuint32mf2_t test_vnot_v_u32mf2(vuint32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnot_v_u32m1(vuint32m1_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m2(
@@ -345,7 +345,7 @@ vuint32m1_t test_vnot_v_u32m1(vuint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnot_v_u32m2(vuint32m2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m4(
@@ -354,7 +354,7 @@ vuint32m2_t test_vnot_v_u32m2(vuint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnot_v_u32m4(vuint32m4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m8(
@@ -363,7 +363,7 @@ vuint32m4_t test_vnot_v_u32m4(vuint32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnot_v_u32m8(vuint32m8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m1(
@@ -372,7 +372,7 @@ vuint32m8_t test_vnot_v_u32m8(vuint32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnot_v_u64m1(vuint64m1_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m2(
@@ -381,7 +381,7 @@ vuint64m1_t test_vnot_v_u64m1(vuint64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnot_v_u64m2(vuint64m2_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m4(
@@ -390,7 +390,7 @@ vuint64m2_t test_vnot_v_u64m2(vuint64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnot_v_u64m4(vuint64m4_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m8(
@@ -399,7 +399,7 @@ vuint64m4_t test_vnot_v_u64m4(vuint64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnot_v_u64m8(vuint64m8_t op1, size_t vl) {
- return vnot(op1, vl);
+ return __riscv_vnot(op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vnot_v_u64m8(vuint64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2_m(
@@ -426,7 +426,7 @@ vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m1_m(
@@ -435,7 +435,7 @@ vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m2_m(
@@ -444,7 +444,7 @@ vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m4_m(
@@ -453,7 +453,7 @@ vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m8_m(
@@ -462,7 +462,7 @@ vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4_m(
@@ -471,7 +471,7 @@ vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2_m(
@@ -480,7 +480,7 @@ vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m1_m(
@@ -489,7 +489,7 @@ vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m2_m(
@@ -498,7 +498,7 @@ vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m4_m(
@@ -507,7 +507,7 @@ vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m8_m(
@@ -516,7 +516,7 @@ vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_m(
@@ -525,7 +525,7 @@ vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m1_m(
@@ -534,7 +534,7 @@ vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m2_m(
@@ -543,7 +543,7 @@ vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m4_m(
@@ -552,7 +552,7 @@ vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m8_m(
@@ -561,7 +561,7 @@ vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m1_m(
@@ -570,7 +570,7 @@ vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m2_m(
@@ -579,7 +579,7 @@ vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m4_m(
@@ -588,7 +588,7 @@ vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m8_m(
@@ -597,7 +597,7 @@ vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8_m(
@@ -606,7 +606,7 @@ vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4_m(
@@ -615,7 +615,7 @@ vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2_m(
@@ -624,7 +624,7 @@ vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m1_m(
@@ -633,7 +633,7 @@ vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m2_m(
@@ -642,7 +642,7 @@ vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m4_m(
@@ -651,7 +651,7 @@ vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m8_m(
@@ -660,7 +660,7 @@ vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4_m(
@@ -669,7 +669,7 @@ vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2_m(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m1_m(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m2_m(
@@ -696,7 +696,7 @@ vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m4_m(
@@ -705,7 +705,7 @@ vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m8_m(
@@ -714,7 +714,7 @@ vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_m(
@@ -723,7 +723,7 @@ vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m1_m(
@@ -732,7 +732,7 @@ vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m2_m(
@@ -741,7 +741,7 @@ vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m4_m(
@@ -750,7 +750,7 @@ vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m8_m(
@@ -759,7 +759,7 @@ vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m1_m(
@@ -768,7 +768,7 @@ vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m2_m(
@@ -777,7 +777,7 @@ vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m4_m(
@@ -786,7 +786,7 @@ vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnot_v_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t vl) {
- return vnot(mask, op1, vl);
+ return __riscv_vnot(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsra.c
index 4b4de53b5a79..0da16dc04397 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsra.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
@@ -120,7 +120,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
@@ -129,7 +129,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
@@ -138,7 +138,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
@@ -147,7 +147,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
@@ -156,7 +156,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
@@ -165,7 +165,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
@@ -174,7 +174,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
@@ -183,7 +183,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
@@ -192,7 +192,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
@@ -201,7 +201,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
@@ -210,7 +210,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
@@ -219,7 +219,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
@@ -228,7 +228,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
@@ -237,7 +237,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
@@ -246,7 +246,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
@@ -255,7 +255,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
@@ -264,7 +264,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
@@ -273,7 +273,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
- return vnsra(op1, shift, vl);
+ return __riscv_vnsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m(
@@ -282,7 +282,7 @@ vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m(
@@ -291,7 +291,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m(
@@ -300,7 +300,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m(
@@ -309,7 +309,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m(
@@ -318,7 +318,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m(
@@ -327,7 +327,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m(
@@ -336,7 +336,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m(
@@ -345,7 +345,7 @@ vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m(
@@ -354,7 +354,7 @@ vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m(
@@ -363,7 +363,7 @@ vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m(
@@ -372,7 +372,7 @@ vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m(
@@ -381,7 +381,7 @@ vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m(
@@ -390,7 +390,7 @@ vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m(
@@ -399,7 +399,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m(
@@ -408,7 +408,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m(
@@ -417,7 +417,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m(
@@ -426,7 +426,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m(
@@ -435,7 +435,7 @@ vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m(
@@ -444,7 +444,7 @@ vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m(
@@ -453,7 +453,7 @@ vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shif
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m(
@@ -462,7 +462,7 @@ vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m(
@@ -471,7 +471,7 @@ vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shif
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m(
@@ -480,7 +480,7 @@ vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m(
@@ -489,7 +489,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m(
@@ -498,7 +498,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m(
@@ -507,7 +507,7 @@ vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m(
@@ -516,7 +516,7 @@ vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m(
@@ -525,7 +525,7 @@ vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m(
@@ -534,7 +534,7 @@ vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m(
@@ -543,6 +543,6 @@ vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shif
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) {
- return vnsra(mask, op1, shift, vl);
+ return __riscv_vnsra(mask, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsrl.c
index 441cf64de787..b7b7400494a3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsrl.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4(
@@ -120,7 +120,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4(
@@ -129,7 +129,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2(
@@ -138,7 +138,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2(
@@ -147,7 +147,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1(
@@ -156,7 +156,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1(
@@ -165,7 +165,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2(
@@ -174,7 +174,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2(
@@ -183,7 +183,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4(
@@ -192,7 +192,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4(
@@ -201,7 +201,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2(
@@ -210,7 +210,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2(
@@ -219,7 +219,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1(
@@ -228,7 +228,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1(
@@ -237,7 +237,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2(
@@ -246,7 +246,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2(
@@ -255,7 +255,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4(
@@ -264,7 +264,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4(
@@ -273,7 +273,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
- return vnsrl(op1, shift, vl);
+ return __riscv_vnsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m(
@@ -282,7 +282,7 @@ vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m(
@@ -291,7 +291,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m(
@@ -300,7 +300,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m(
@@ -309,7 +309,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m(
@@ -318,7 +318,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m(
@@ -327,7 +327,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m(
@@ -336,7 +336,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m(
@@ -345,7 +345,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m(
@@ -354,7 +354,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m(
@@ -363,7 +363,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m(
@@ -372,7 +372,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m(
@@ -381,7 +381,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m(
@@ -390,7 +390,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m(
@@ -399,7 +399,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m(
@@ -408,7 +408,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m(
@@ -417,7 +417,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m(
@@ -426,7 +426,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m(
@@ -435,7 +435,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m(
@@ -444,7 +444,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m(
@@ -453,7 +453,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m(
@@ -462,7 +462,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m(
@@ -471,7 +471,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m(
@@ -480,7 +480,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m(
@@ -489,7 +489,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m(
@@ -498,7 +498,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m(
@@ -507,7 +507,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m(
@@ -516,7 +516,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m(
@@ -525,7 +525,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m(
@@ -534,7 +534,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m(
@@ -543,6 +543,6 @@ vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnsrl(mask, op1, shift, vl);
+ return __riscv_vnsrl(mask, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vor.c
index 850446ba67e3..4fb54cecf66b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
@@ -408,7 +408,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m1(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m1(
@@ -471,7 +471,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m2(
@@ -480,7 +480,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m2(
@@ -489,7 +489,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m4(
@@ -498,7 +498,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m4(
@@ -507,7 +507,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m8(
@@ -516,7 +516,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m8(
@@ -525,7 +525,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
@@ -534,7 +534,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m1(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m1(
@@ -579,7 +579,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m2(
@@ -588,7 +588,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m2(
@@ -597,7 +597,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m4(
@@ -606,7 +606,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m4(
@@ -615,7 +615,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m8(
@@ -624,7 +624,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m8(
@@ -633,7 +633,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
@@ -642,7 +642,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m1(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m1(
@@ -669,7 +669,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m2(
@@ -678,7 +678,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m2(
@@ -687,7 +687,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m4(
@@ -696,7 +696,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m4(
@@ -705,7 +705,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m8(
@@ -714,7 +714,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m8(
@@ -723,7 +723,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m1(
@@ -732,7 +732,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m1(
@@ -741,7 +741,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m2(
@@ -750,7 +750,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m2(
@@ -759,7 +759,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m4(
@@ -768,7 +768,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m4(
@@ -777,7 +777,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m8(
@@ -786,7 +786,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m8(
@@ -795,7 +795,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vor(op1, op2, vl);
+ return __riscv_vor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m(
@@ -804,7 +804,7 @@ vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m(
@@ -813,7 +813,7 @@ vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m(
@@ -822,7 +822,7 @@ vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m(
@@ -831,7 +831,7 @@ vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m(
@@ -840,7 +840,7 @@ vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m(
@@ -849,7 +849,7 @@ vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m1_m(
@@ -858,7 +858,7 @@ vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m1_m(
@@ -867,7 +867,7 @@ vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m2_m(
@@ -876,7 +876,7 @@ vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m2_m(
@@ -885,7 +885,7 @@ vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m4_m(
@@ -894,7 +894,7 @@ vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m4_m(
@@ -903,7 +903,7 @@ vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m8_m(
@@ -912,7 +912,7 @@ vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m8_m(
@@ -921,7 +921,7 @@ vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m(
@@ -930,7 +930,7 @@ vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m(
@@ -939,7 +939,7 @@ vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m(
@@ -948,7 +948,7 @@ vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m(
@@ -957,7 +957,7 @@ vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m1_m(
@@ -966,7 +966,7 @@ vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m1_m(
@@ -975,7 +975,7 @@ vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m2_m(
@@ -984,7 +984,7 @@ vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m2_m(
@@ -993,7 +993,7 @@ vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m4_m(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m4_m(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m8_m(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m8_m(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m1_m(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m1_m(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m2_m(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m2_m(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m4_m(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m4_m(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m8_m(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m8_m(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m1_m(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m1_m(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m2_m(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m2_m(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m4_m(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m4_m(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m8_m(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m8_m(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m1_m(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m1_m(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m2_m(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m2_m(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m4_m(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m4_m(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m8_m(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m8_m(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m1_m(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m1_m(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m2_m(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m2_m(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m4_m(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m4_m(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m8_m(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m8_m(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m1_m(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m1_m(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m2_m(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m2_m(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m4_m(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m4_m(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m8_m(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m8_m(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m1_m(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m1_m(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m2_m(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m2_m(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m4_m(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m4_m(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m8_m(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m8_m(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vor(mask, op1, op2, vl);
+ return __riscv_vor(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c
index fa5e59a67670..fd031d20ef16 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1(
@@ -21,7 +21,7 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1(
@@ -30,7 +30,7 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1(
@@ -39,7 +39,7 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1(
@@ -48,7 +48,7 @@ vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1(
@@ -57,7 +57,7 @@ vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1(
@@ -66,7 +66,7 @@ vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1(
@@ -84,7 +84,7 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1(
@@ -93,7 +93,7 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1(
@@ -102,7 +102,7 @@ vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1(
@@ -111,7 +111,7 @@ vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1(
@@ -120,7 +120,7 @@ vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1(
@@ -129,7 +129,7 @@ vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1(
@@ -138,7 +138,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1(
@@ -147,7 +147,7 @@ vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1(
@@ -156,7 +156,7 @@ vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1(
@@ -165,7 +165,7 @@ vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1(
@@ -174,7 +174,7 @@ vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1(
@@ -183,7 +183,7 @@ vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1(
@@ -192,7 +192,7 @@ vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1(
@@ -201,7 +201,7 @@ vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1(
@@ -210,7 +210,7 @@ vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1(
@@ -399,7 +399,7 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand(vector, scalar, vl);
+ return __riscv_vredand(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
@@ -408,7 +408,7 @@ vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
@@ -417,7 +417,7 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
@@ -426,7 +426,7 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m(
@@ -435,7 +435,7 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m(
@@ -444,7 +444,7 @@ vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m(
@@ -453,7 +453,7 @@ vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m(
@@ -462,7 +462,7 @@ vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
@@ -480,7 +480,7 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m(
@@ -489,7 +489,7 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m(
@@ -498,7 +498,7 @@ vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m(
@@ -507,7 +507,7 @@ vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m(
@@ -516,7 +516,7 @@ vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
@@ -525,7 +525,7 @@ vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m(
@@ -534,7 +534,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m(
@@ -543,7 +543,7 @@ vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m(
@@ -552,7 +552,7 @@ vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m(
@@ -561,7 +561,7 @@ vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m(
@@ -570,7 +570,7 @@ vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m(
@@ -579,7 +579,7 @@ vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m(
@@ -588,7 +588,7 @@ vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m(
@@ -597,7 +597,7 @@ vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
@@ -606,7 +606,7 @@ vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
@@ -615,7 +615,7 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
@@ -624,7 +624,7 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m(
@@ -633,7 +633,7 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m(
@@ -642,7 +642,7 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m(
@@ -651,7 +651,7 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m(
@@ -660,7 +660,7 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
@@ -669,7 +669,7 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
@@ -678,7 +678,7 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m(
@@ -687,7 +687,7 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m(
@@ -696,7 +696,7 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m(
@@ -705,7 +705,7 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m(
@@ -714,7 +714,7 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
@@ -723,7 +723,7 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m(
@@ -732,7 +732,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m(
@@ -741,7 +741,7 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m(
@@ -750,7 +750,7 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m(
@@ -759,7 +759,7 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m(
@@ -768,7 +768,7 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m(
@@ -777,7 +777,7 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m(
@@ -786,7 +786,7 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m(
@@ -795,6 +795,6 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand(mask, vector, scalar, vl);
+ return __riscv_vredand(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c
index c99088c62354..7e96d0e679b1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1(
@@ -21,7 +21,7 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1(
@@ -30,7 +30,7 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1(
@@ -39,7 +39,7 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1(
@@ -48,7 +48,7 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1(
@@ -57,7 +57,7 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1(
@@ -66,7 +66,7 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1(
@@ -84,7 +84,7 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1(
@@ -93,7 +93,7 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1(
@@ -102,7 +102,7 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1(
@@ -111,7 +111,7 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1(
@@ -120,7 +120,7 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1(
@@ -129,7 +129,7 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1(
@@ -138,7 +138,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1(
@@ -147,7 +147,7 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1(
@@ -156,7 +156,7 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1(
@@ -165,7 +165,7 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1(
@@ -174,7 +174,7 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1(
@@ -183,7 +183,7 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1(
@@ -192,7 +192,7 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1(
@@ -201,7 +201,7 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax(vector, scalar, vl);
+ return __riscv_vredmax(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
@@ -210,7 +210,7 @@ vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
@@ -219,7 +219,7 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
@@ -228,7 +228,7 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
@@ -237,7 +237,7 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
@@ -246,7 +246,7 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
@@ -255,7 +255,7 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
@@ -264,7 +264,7 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
@@ -273,7 +273,7 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
@@ -282,7 +282,7 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
@@ -291,7 +291,7 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
@@ -300,7 +300,7 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
@@ -309,7 +309,7 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
@@ -318,7 +318,7 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
@@ -327,7 +327,7 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
@@ -336,7 +336,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
@@ -345,7 +345,7 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
@@ -354,7 +354,7 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
@@ -363,7 +363,7 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
@@ -372,7 +372,7 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
@@ -381,7 +381,7 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
@@ -390,7 +390,7 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
@@ -399,6 +399,6 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax(mask, vector, scalar, vl);
+ return __riscv_vredmax(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c
index 4ebbb6df858c..5af326f56918 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
@@ -21,7 +21,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
@@ -30,7 +30,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
@@ -39,7 +39,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
@@ -48,7 +48,7 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
@@ -57,7 +57,7 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
@@ -66,7 +66,7 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
@@ -84,7 +84,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scala
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
@@ -93,7 +93,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scala
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
@@ -102,7 +102,7 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
@@ -111,7 +111,7 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
@@ -120,7 +120,7 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
@@ -129,7 +129,7 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
@@ -138,7 +138,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scala
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
@@ -147,7 +147,7 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
@@ -156,7 +156,7 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
@@ -165,7 +165,7 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
@@ -174,7 +174,7 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
@@ -183,7 +183,7 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
@@ -192,7 +192,7 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
@@ -201,7 +201,7 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu(vector, scalar, vl);
+ return __riscv_vredmaxu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
@@ -210,7 +210,7 @@ vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
@@ -399,6 +399,6 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu(mask, vector, scalar, vl);
+ return __riscv_vredmaxu(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c
index 90ec0220816c..f583c4548a7a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1(
@@ -21,7 +21,7 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1(
@@ -30,7 +30,7 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1(
@@ -39,7 +39,7 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1(
@@ -48,7 +48,7 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1(
@@ -57,7 +57,7 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1(
@@ -66,7 +66,7 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1(
@@ -84,7 +84,7 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1(
@@ -93,7 +93,7 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1(
@@ -102,7 +102,7 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1(
@@ -111,7 +111,7 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1(
@@ -120,7 +120,7 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1(
@@ -129,7 +129,7 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1(
@@ -138,7 +138,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1(
@@ -147,7 +147,7 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1(
@@ -156,7 +156,7 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1(
@@ -165,7 +165,7 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1(
@@ -174,7 +174,7 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1(
@@ -183,7 +183,7 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1(
@@ -192,7 +192,7 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1(
@@ -201,7 +201,7 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin(vector, scalar, vl);
+ return __riscv_vredmin(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
@@ -210,7 +210,7 @@ vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
@@ -219,7 +219,7 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
@@ -228,7 +228,7 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
@@ -237,7 +237,7 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
@@ -246,7 +246,7 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
@@ -255,7 +255,7 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
@@ -264,7 +264,7 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
@@ -273,7 +273,7 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
@@ -282,7 +282,7 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
@@ -291,7 +291,7 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
@@ -300,7 +300,7 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
@@ -309,7 +309,7 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
@@ -318,7 +318,7 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
@@ -327,7 +327,7 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
@@ -336,7 +336,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
@@ -345,7 +345,7 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
@@ -354,7 +354,7 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
@@ -363,7 +363,7 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
@@ -372,7 +372,7 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
@@ -381,7 +381,7 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
@@ -390,7 +390,7 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
@@ -399,6 +399,6 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin(mask, vector, scalar, vl);
+ return __riscv_vredmin(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c
index cce5ec38d098..814c84901f78 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1(
@@ -21,7 +21,7 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1(
@@ -30,7 +30,7 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1(
@@ -39,7 +39,7 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1(
@@ -48,7 +48,7 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1(
@@ -57,7 +57,7 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1(
@@ -66,7 +66,7 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1(
@@ -84,7 +84,7 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scala
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1(
@@ -93,7 +93,7 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scala
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1(
@@ -102,7 +102,7 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1(
@@ -111,7 +111,7 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1(
@@ -120,7 +120,7 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1(
@@ -129,7 +129,7 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1(
@@ -138,7 +138,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scala
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1(
@@ -147,7 +147,7 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1(
@@ -156,7 +156,7 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1(
@@ -165,7 +165,7 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1(
@@ -174,7 +174,7 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1(
@@ -183,7 +183,7 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1(
@@ -192,7 +192,7 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1(
@@ -201,7 +201,7 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu(vector, scalar, vl);
+ return __riscv_vredminu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
@@ -210,7 +210,7 @@ vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
@@ -399,6 +399,6 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu(mask, vector, scalar, vl);
+ return __riscv_vredminu(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c
index 9defa5df526d..3e3b95f4148c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1(
@@ -21,7 +21,7 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1(
@@ -30,7 +30,7 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1(
@@ -39,7 +39,7 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1(
@@ -48,7 +48,7 @@ vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1(
@@ -57,7 +57,7 @@ vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1(
@@ -66,7 +66,7 @@ vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1(
@@ -84,7 +84,7 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1(
@@ -93,7 +93,7 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1(
@@ -102,7 +102,7 @@ vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1(
@@ -111,7 +111,7 @@ vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1(
@@ -120,7 +120,7 @@ vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1(
@@ -129,7 +129,7 @@ vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1(
@@ -138,7 +138,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1(
@@ -147,7 +147,7 @@ vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1(
@@ -156,7 +156,7 @@ vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1(
@@ -165,7 +165,7 @@ vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1(
@@ -174,7 +174,7 @@ vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1(
@@ -183,7 +183,7 @@ vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1(
@@ -192,7 +192,7 @@ vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1(
@@ -201,7 +201,7 @@ vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1(
@@ -210,7 +210,7 @@ vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1(
@@ -399,7 +399,7 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor(vector, scalar, vl);
+ return __riscv_vredor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
@@ -408,7 +408,7 @@ vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
@@ -417,7 +417,7 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
@@ -426,7 +426,7 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m(
@@ -435,7 +435,7 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m(
@@ -444,7 +444,7 @@ vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m(
@@ -453,7 +453,7 @@ vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m(
@@ -462,7 +462,7 @@ vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
@@ -480,7 +480,7 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m(
@@ -489,7 +489,7 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m(
@@ -498,7 +498,7 @@ vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m(
@@ -507,7 +507,7 @@ vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m(
@@ -516,7 +516,7 @@ vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
@@ -525,7 +525,7 @@ vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m(
@@ -534,7 +534,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m(
@@ -543,7 +543,7 @@ vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m(
@@ -552,7 +552,7 @@ vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m(
@@ -561,7 +561,7 @@ vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m(
@@ -570,7 +570,7 @@ vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m(
@@ -579,7 +579,7 @@ vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m(
@@ -588,7 +588,7 @@ vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m(
@@ -597,7 +597,7 @@ vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
@@ -606,7 +606,7 @@ vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
@@ -615,7 +615,7 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
@@ -624,7 +624,7 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m(
@@ -633,7 +633,7 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m(
@@ -642,7 +642,7 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m(
@@ -651,7 +651,7 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m(
@@ -660,7 +660,7 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
@@ -669,7 +669,7 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
@@ -678,7 +678,7 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m(
@@ -687,7 +687,7 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m(
@@ -696,7 +696,7 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m(
@@ -705,7 +705,7 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m(
@@ -714,7 +714,7 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
@@ -723,7 +723,7 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m(
@@ -732,7 +732,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m(
@@ -741,7 +741,7 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m(
@@ -750,7 +750,7 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m(
@@ -759,7 +759,7 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m(
@@ -768,7 +768,7 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m(
@@ -777,7 +777,7 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m(
@@ -786,7 +786,7 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m(
@@ -795,6 +795,6 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor(mask, vector, scalar, vl);
+ return __riscv_vredor(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c
index 2a7bd41b21fe..08d800f753a8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
@@ -21,7 +21,7 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
@@ -30,7 +30,7 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
@@ -39,7 +39,7 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
@@ -48,7 +48,7 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
@@ -57,7 +57,7 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
@@ -66,7 +66,7 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
@@ -84,7 +84,7 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
@@ -93,7 +93,7 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
@@ -102,7 +102,7 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
@@ -111,7 +111,7 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
@@ -120,7 +120,7 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
@@ -129,7 +129,7 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
@@ -138,7 +138,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
@@ -147,7 +147,7 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
@@ -156,7 +156,7 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
@@ -165,7 +165,7 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
@@ -174,7 +174,7 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
@@ -183,7 +183,7 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
@@ -192,7 +192,7 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
@@ -201,7 +201,7 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
@@ -210,7 +210,7 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
@@ -399,7 +399,7 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum(vector, scalar, vl);
+ return __riscv_vredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
@@ -408,7 +408,7 @@ vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
@@ -417,7 +417,7 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
@@ -426,7 +426,7 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
@@ -435,7 +435,7 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
@@ -444,7 +444,7 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
@@ -453,7 +453,7 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
@@ -462,7 +462,7 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
@@ -480,7 +480,7 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
@@ -489,7 +489,7 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
@@ -498,7 +498,7 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
@@ -507,7 +507,7 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
@@ -516,7 +516,7 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
@@ -525,7 +525,7 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
@@ -534,7 +534,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
@@ -543,7 +543,7 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
@@ -552,7 +552,7 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
@@ -561,7 +561,7 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
@@ -570,7 +570,7 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
@@ -579,7 +579,7 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
@@ -588,7 +588,7 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
@@ -597,7 +597,7 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
@@ -606,7 +606,7 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
@@ -615,7 +615,7 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
@@ -624,7 +624,7 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
@@ -633,7 +633,7 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
@@ -642,7 +642,7 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
@@ -651,7 +651,7 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
@@ -660,7 +660,7 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
@@ -669,7 +669,7 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
@@ -678,7 +678,7 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
@@ -687,7 +687,7 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
@@ -696,7 +696,7 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
@@ -705,7 +705,7 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
@@ -714,7 +714,7 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
@@ -723,7 +723,7 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
@@ -732,7 +732,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
@@ -741,7 +741,7 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
@@ -750,7 +750,7 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
@@ -759,7 +759,7 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
@@ -768,7 +768,7 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
@@ -777,7 +777,7 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
@@ -786,7 +786,7 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
@@ -795,6 +795,6 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum(mask, vector, scalar, vl);
+ return __riscv_vredsum(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c
index d269fbb363cd..4e10d0e1924c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
@@ -21,7 +21,7 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
@@ -30,7 +30,7 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
@@ -39,7 +39,7 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
@@ -48,7 +48,7 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
@@ -57,7 +57,7 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
@@ -66,7 +66,7 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
@@ -84,7 +84,7 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
@@ -93,7 +93,7 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
@@ -102,7 +102,7 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
@@ -111,7 +111,7 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
@@ -120,7 +120,7 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
@@ -129,7 +129,7 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
@@ -138,7 +138,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
@@ -147,7 +147,7 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
@@ -156,7 +156,7 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
@@ -165,7 +165,7 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
@@ -174,7 +174,7 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
@@ -183,7 +183,7 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
@@ -192,7 +192,7 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
@@ -201,7 +201,7 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
@@ -210,7 +210,7 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
@@ -399,7 +399,7 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor(vector, scalar, vl);
+ return __riscv_vredxor(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
@@ -408,7 +408,7 @@ vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
@@ -417,7 +417,7 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
@@ -426,7 +426,7 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
@@ -435,7 +435,7 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
@@ -444,7 +444,7 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
@@ -453,7 +453,7 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
@@ -462,7 +462,7 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
@@ -480,7 +480,7 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
@@ -489,7 +489,7 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
@@ -498,7 +498,7 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
@@ -507,7 +507,7 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
@@ -516,7 +516,7 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
@@ -525,7 +525,7 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
@@ -534,7 +534,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
@@ -543,7 +543,7 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
@@ -552,7 +552,7 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
@@ -561,7 +561,7 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
@@ -570,7 +570,7 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
@@ -579,7 +579,7 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
@@ -588,7 +588,7 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
@@ -597,7 +597,7 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
@@ -606,7 +606,7 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
@@ -615,7 +615,7 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
@@ -624,7 +624,7 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
@@ -633,7 +633,7 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
@@ -642,7 +642,7 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
@@ -651,7 +651,7 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
@@ -660,7 +660,7 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
@@ -669,7 +669,7 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
@@ -678,7 +678,7 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
@@ -687,7 +687,7 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
@@ -696,7 +696,7 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
@@ -705,7 +705,7 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
@@ -714,7 +714,7 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
@@ -723,7 +723,7 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
@@ -732,7 +732,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
@@ -741,7 +741,7 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
@@ -750,7 +750,7 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
@@ -759,7 +759,7 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
@@ -768,7 +768,7 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
@@ -777,7 +777,7 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
@@ -786,7 +786,7 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
@@ -795,6 +795,6 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor(mask, vector, scalar, vl);
+ return __riscv_vredxor(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vreinterpret.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vreinterpret.c
index a3610d08ce6d..0c1e87966f2e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vreinterpret.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vreinterpret.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[SRC:%.*]]
//
vuint8mf8_t test_vreinterpret_v_i8mf8_u8mf8(vint8mf8_t src) {
- return vreinterpret_u8mf8(src);
+ return __riscv_vreinterpret_u8mf8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_u8mf4(
@@ -20,7 +20,7 @@ vuint8mf8_t test_vreinterpret_v_i8mf8_u8mf8(vint8mf8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[SRC:%.*]]
//
vuint8mf4_t test_vreinterpret_v_i8mf4_u8mf4(vint8mf4_t src) {
- return vreinterpret_u8mf4(src);
+ return __riscv_vreinterpret_u8mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_u8mf2(
@@ -28,7 +28,7 @@ vuint8mf4_t test_vreinterpret_v_i8mf4_u8mf4(vint8mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[SRC:%.*]]
//
vuint8mf2_t test_vreinterpret_v_i8mf2_u8mf2(vint8mf2_t src) {
- return vreinterpret_u8mf2(src);
+ return __riscv_vreinterpret_u8mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_u8m1(
@@ -36,7 +36,7 @@ vuint8mf2_t test_vreinterpret_v_i8mf2_u8mf2(vint8mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[SRC:%.*]]
//
vuint8m1_t test_vreinterpret_v_i8m1_u8m1(vint8m1_t src) {
- return vreinterpret_u8m1(src);
+ return __riscv_vreinterpret_u8m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_u8m2(
@@ -44,7 +44,7 @@ vuint8m1_t test_vreinterpret_v_i8m1_u8m1(vint8m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[SRC:%.*]]
//
vuint8m2_t test_vreinterpret_v_i8m2_u8m2(vint8m2_t src) {
- return vreinterpret_u8m2(src);
+ return __riscv_vreinterpret_u8m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_u8m4(
@@ -52,7 +52,7 @@ vuint8m2_t test_vreinterpret_v_i8m2_u8m2(vint8m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[SRC:%.*]]
//
vuint8m4_t test_vreinterpret_v_i8m4_u8m4(vint8m4_t src) {
- return vreinterpret_u8m4(src);
+ return __riscv_vreinterpret_u8m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_u8m8(
@@ -60,7 +60,7 @@ vuint8m4_t test_vreinterpret_v_i8m4_u8m4(vint8m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[SRC:%.*]]
//
vuint8m8_t test_vreinterpret_v_i8m8_u8m8(vint8m8_t src) {
- return vreinterpret_u8m8(src);
+ return __riscv_vreinterpret_u8m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf8_i8mf8(
@@ -68,7 +68,7 @@ vuint8m8_t test_vreinterpret_v_i8m8_u8m8(vint8m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[SRC:%.*]]
//
vint8mf8_t test_vreinterpret_v_u8mf8_i8mf8(vuint8mf8_t src) {
- return vreinterpret_i8mf8(src);
+ return __riscv_vreinterpret_i8mf8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_i8mf4(
@@ -76,7 +76,7 @@ vint8mf8_t test_vreinterpret_v_u8mf8_i8mf8(vuint8mf8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[SRC:%.*]]
//
vint8mf4_t test_vreinterpret_v_u8mf4_i8mf4(vuint8mf4_t src) {
- return vreinterpret_i8mf4(src);
+ return __riscv_vreinterpret_i8mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_i8mf2(
@@ -84,7 +84,7 @@ vint8mf4_t test_vreinterpret_v_u8mf4_i8mf4(vuint8mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[SRC:%.*]]
//
vint8mf2_t test_vreinterpret_v_u8mf2_i8mf2(vuint8mf2_t src) {
- return vreinterpret_i8mf2(src);
+ return __riscv_vreinterpret_i8mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_i8m1(
@@ -92,7 +92,7 @@ vint8mf2_t test_vreinterpret_v_u8mf2_i8mf2(vuint8mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[SRC:%.*]]
//
vint8m1_t test_vreinterpret_v_u8m1_i8m1(vuint8m1_t src) {
- return vreinterpret_i8m1(src);
+ return __riscv_vreinterpret_i8m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_i8m2(
@@ -100,7 +100,7 @@ vint8m1_t test_vreinterpret_v_u8m1_i8m1(vuint8m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[SRC:%.*]]
//
vint8m2_t test_vreinterpret_v_u8m2_i8m2(vuint8m2_t src) {
- return vreinterpret_i8m2(src);
+ return __riscv_vreinterpret_i8m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_i8m4(
@@ -108,7 +108,7 @@ vint8m2_t test_vreinterpret_v_u8m2_i8m2(vuint8m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[SRC:%.*]]
//
vint8m4_t test_vreinterpret_v_u8m4_i8m4(vuint8m4_t src) {
- return vreinterpret_i8m4(src);
+ return __riscv_vreinterpret_i8m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_i8m8(
@@ -116,7 +116,7 @@ vint8m4_t test_vreinterpret_v_u8m4_i8m4(vuint8m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[SRC:%.*]]
//
vint8m8_t test_vreinterpret_v_u8m8_i8m8(vuint8m8_t src) {
- return vreinterpret_i8m8(src);
+ return __riscv_vreinterpret_i8m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_f16mf4(
@@ -125,7 +125,7 @@ vint8m8_t test_vreinterpret_v_u8m8_i8m8(vuint8m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vreinterpret_v_i16mf4_f16mf4(vint16mf4_t src) {
- return vreinterpret_f16mf4(src);
+ return __riscv_vreinterpret_f16mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_f16mf2(
@@ -134,7 +134,7 @@ vfloat16mf4_t test_vreinterpret_v_i16mf4_f16mf4(vint16mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vreinterpret_v_i16mf2_f16mf2(vint16mf2_t src) {
- return vreinterpret_f16mf2(src);
+ return __riscv_vreinterpret_f16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_f16m1(
@@ -143,7 +143,7 @@ vfloat16mf2_t test_vreinterpret_v_i16mf2_f16mf2(vint16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vreinterpret_v_i16m1_f16m1(vint16m1_t src) {
- return vreinterpret_f16m1(src);
+ return __riscv_vreinterpret_f16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_f16m2(
@@ -152,7 +152,7 @@ vfloat16m1_t test_vreinterpret_v_i16m1_f16m1(vint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vreinterpret_v_i16m2_f16m2(vint16m2_t src) {
- return vreinterpret_f16m2(src);
+ return __riscv_vreinterpret_f16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_f16m4(
@@ -161,7 +161,7 @@ vfloat16m2_t test_vreinterpret_v_i16m2_f16m2(vint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vreinterpret_v_i16m4_f16m4(vint16m4_t src) {
- return vreinterpret_f16m4(src);
+ return __riscv_vreinterpret_f16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_f16m8(
@@ -170,7 +170,7 @@ vfloat16m4_t test_vreinterpret_v_i16m4_f16m4(vint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vreinterpret_v_i16m8_f16m8(vint16m8_t src) {
- return vreinterpret_f16m8(src);
+ return __riscv_vreinterpret_f16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_f16mf4(
@@ -179,7 +179,7 @@ vfloat16m8_t test_vreinterpret_v_i16m8_f16m8(vint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vreinterpret_v_u16mf4_f16mf4(vuint16mf4_t src) {
- return vreinterpret_f16mf4(src);
+ return __riscv_vreinterpret_f16mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_f16mf2(
@@ -188,7 +188,7 @@ vfloat16mf4_t test_vreinterpret_v_u16mf4_f16mf4(vuint16mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vreinterpret_v_u16mf2_f16mf2(vuint16mf2_t src) {
- return vreinterpret_f16mf2(src);
+ return __riscv_vreinterpret_f16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_f16m1(
@@ -197,7 +197,7 @@ vfloat16mf2_t test_vreinterpret_v_u16mf2_f16mf2(vuint16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vreinterpret_v_u16m1_f16m1(vuint16m1_t src) {
- return vreinterpret_f16m1(src);
+ return __riscv_vreinterpret_f16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_f16m2(
@@ -206,7 +206,7 @@ vfloat16m1_t test_vreinterpret_v_u16m1_f16m1(vuint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vreinterpret_v_u16m2_f16m2(vuint16m2_t src) {
- return vreinterpret_f16m2(src);
+ return __riscv_vreinterpret_f16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_f16m4(
@@ -215,7 +215,7 @@ vfloat16m2_t test_vreinterpret_v_u16m2_f16m2(vuint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vreinterpret_v_u16m4_f16m4(vuint16m4_t src) {
- return vreinterpret_f16m4(src);
+ return __riscv_vreinterpret_f16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_f16m8(
@@ -224,7 +224,7 @@ vfloat16m4_t test_vreinterpret_v_u16m4_f16m4(vuint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vreinterpret_v_u16m8_f16m8(vuint16m8_t src) {
- return vreinterpret_f16m8(src);
+ return __riscv_vreinterpret_f16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_u16mf4(
@@ -232,7 +232,7 @@ vfloat16m8_t test_vreinterpret_v_u16m8_f16m8(vuint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[SRC:%.*]]
//
vuint16mf4_t test_vreinterpret_v_i16mf4_u16mf4(vint16mf4_t src) {
- return vreinterpret_u16mf4(src);
+ return __riscv_vreinterpret_u16mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_u16mf2(
@@ -240,7 +240,7 @@ vuint16mf4_t test_vreinterpret_v_i16mf4_u16mf4(vint16mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[SRC:%.*]]
//
vuint16mf2_t test_vreinterpret_v_i16mf2_u16mf2(vint16mf2_t src) {
- return vreinterpret_u16mf2(src);
+ return __riscv_vreinterpret_u16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_u16m1(
@@ -248,7 +248,7 @@ vuint16mf2_t test_vreinterpret_v_i16mf2_u16mf2(vint16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[SRC:%.*]]
//
vuint16m1_t test_vreinterpret_v_i16m1_u16m1(vint16m1_t src) {
- return vreinterpret_u16m1(src);
+ return __riscv_vreinterpret_u16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_u16m2(
@@ -256,7 +256,7 @@ vuint16m1_t test_vreinterpret_v_i16m1_u16m1(vint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[SRC:%.*]]
//
vuint16m2_t test_vreinterpret_v_i16m2_u16m2(vint16m2_t src) {
- return vreinterpret_u16m2(src);
+ return __riscv_vreinterpret_u16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_u16m4(
@@ -264,7 +264,7 @@ vuint16m2_t test_vreinterpret_v_i16m2_u16m2(vint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[SRC:%.*]]
//
vuint16m4_t test_vreinterpret_v_i16m4_u16m4(vint16m4_t src) {
- return vreinterpret_u16m4(src);
+ return __riscv_vreinterpret_u16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_u16m8(
@@ -272,7 +272,7 @@ vuint16m4_t test_vreinterpret_v_i16m4_u16m4(vint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[SRC:%.*]]
//
vuint16m8_t test_vreinterpret_v_i16m8_u16m8(vint16m8_t src) {
- return vreinterpret_u16m8(src);
+ return __riscv_vreinterpret_u16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_i16mf4(
@@ -280,7 +280,7 @@ vuint16m8_t test_vreinterpret_v_i16m8_u16m8(vint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[SRC:%.*]]
//
vint16mf4_t test_vreinterpret_v_u16mf4_i16mf4(vuint16mf4_t src) {
- return vreinterpret_i16mf4(src);
+ return __riscv_vreinterpret_i16mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_i16mf2(
@@ -288,7 +288,7 @@ vint16mf4_t test_vreinterpret_v_u16mf4_i16mf4(vuint16mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[SRC:%.*]]
//
vint16mf2_t test_vreinterpret_v_u16mf2_i16mf2(vuint16mf2_t src) {
- return vreinterpret_i16mf2(src);
+ return __riscv_vreinterpret_i16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_i16m1(
@@ -296,7 +296,7 @@ vint16mf2_t test_vreinterpret_v_u16mf2_i16mf2(vuint16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[SRC:%.*]]
//
vint16m1_t test_vreinterpret_v_u16m1_i16m1(vuint16m1_t src) {
- return vreinterpret_i16m1(src);
+ return __riscv_vreinterpret_i16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_i16m2(
@@ -304,7 +304,7 @@ vint16m1_t test_vreinterpret_v_u16m1_i16m1(vuint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[SRC:%.*]]
//
vint16m2_t test_vreinterpret_v_u16m2_i16m2(vuint16m2_t src) {
- return vreinterpret_i16m2(src);
+ return __riscv_vreinterpret_i16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_i16m4(
@@ -312,7 +312,7 @@ vint16m2_t test_vreinterpret_v_u16m2_i16m2(vuint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[SRC:%.*]]
//
vint16m4_t test_vreinterpret_v_u16m4_i16m4(vuint16m4_t src) {
- return vreinterpret_i16m4(src);
+ return __riscv_vreinterpret_i16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_i16m8(
@@ -320,7 +320,7 @@ vint16m4_t test_vreinterpret_v_u16m4_i16m4(vuint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[SRC:%.*]]
//
vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) {
- return vreinterpret_i16m8(src);
+ return __riscv_vreinterpret_i16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_i16mf4(
@@ -329,7 +329,7 @@ vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vreinterpret_v_f16mf4_i16mf4(vfloat16mf4_t src) {
- return vreinterpret_i16mf4(src);
+ return __riscv_vreinterpret_i16mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_i16mf2(
@@ -338,7 +338,7 @@ vint16mf4_t test_vreinterpret_v_f16mf4_i16mf4(vfloat16mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vreinterpret_v_f16mf2_i16mf2(vfloat16mf2_t src) {
- return vreinterpret_i16mf2(src);
+ return __riscv_vreinterpret_i16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_i16m1(
@@ -347,7 +347,7 @@ vint16mf2_t test_vreinterpret_v_f16mf2_i16mf2(vfloat16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vreinterpret_v_f16m1_i16m1(vfloat16m1_t src) {
- return vreinterpret_i16m1(src);
+ return __riscv_vreinterpret_i16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_i16m2(
@@ -356,7 +356,7 @@ vint16m1_t test_vreinterpret_v_f16m1_i16m1(vfloat16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vreinterpret_v_f16m2_i16m2(vfloat16m2_t src) {
- return vreinterpret_i16m2(src);
+ return __riscv_vreinterpret_i16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_i16m4(
@@ -365,7 +365,7 @@ vint16m2_t test_vreinterpret_v_f16m2_i16m2(vfloat16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vreinterpret_v_f16m4_i16m4(vfloat16m4_t src) {
- return vreinterpret_i16m4(src);
+ return __riscv_vreinterpret_i16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_i16m8(
@@ -374,7 +374,7 @@ vint16m4_t test_vreinterpret_v_f16m4_i16m4(vfloat16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vreinterpret_v_f16m8_i16m8(vfloat16m8_t src) {
- return vreinterpret_i16m8(src);
+ return __riscv_vreinterpret_i16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_u16mf4(
@@ -383,7 +383,7 @@ vint16m8_t test_vreinterpret_v_f16m8_i16m8(vfloat16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vreinterpret_v_f16mf4_u16mf4(vfloat16mf4_t src) {
- return vreinterpret_u16mf4(src);
+ return __riscv_vreinterpret_u16mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_u16mf2(
@@ -392,7 +392,7 @@ vuint16mf4_t test_vreinterpret_v_f16mf4_u16mf4(vfloat16mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vreinterpret_v_f16mf2_u16mf2(vfloat16mf2_t src) {
- return vreinterpret_u16mf2(src);
+ return __riscv_vreinterpret_u16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_u16m1(
@@ -401,7 +401,7 @@ vuint16mf2_t test_vreinterpret_v_f16mf2_u16mf2(vfloat16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vreinterpret_v_f16m1_u16m1(vfloat16m1_t src) {
- return vreinterpret_u16m1(src);
+ return __riscv_vreinterpret_u16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_u16m2(
@@ -410,7 +410,7 @@ vuint16m1_t test_vreinterpret_v_f16m1_u16m1(vfloat16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vreinterpret_v_f16m2_u16m2(vfloat16m2_t src) {
- return vreinterpret_u16m2(src);
+ return __riscv_vreinterpret_u16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_u16m4(
@@ -419,7 +419,7 @@ vuint16m2_t test_vreinterpret_v_f16m2_u16m2(vfloat16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vreinterpret_v_f16m4_u16m4(vfloat16m4_t src) {
- return vreinterpret_u16m4(src);
+ return __riscv_vreinterpret_u16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_u16m8(
@@ -428,7 +428,7 @@ vuint16m4_t test_vreinterpret_v_f16m4_u16m4(vfloat16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vreinterpret_v_f16m8_u16m8(vfloat16m8_t src) {
- return vreinterpret_u16m8(src);
+ return __riscv_vreinterpret_u16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_f32mf2(
@@ -437,7 +437,7 @@ vuint16m8_t test_vreinterpret_v_f16m8_u16m8(vfloat16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vreinterpret_v_i32mf2_f32mf2(vint32mf2_t src) {
- return vreinterpret_f32mf2(src);
+ return __riscv_vreinterpret_f32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_f32m1(
@@ -446,7 +446,7 @@ vfloat32mf2_t test_vreinterpret_v_i32mf2_f32mf2(vint32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vreinterpret_v_i32m1_f32m1(vint32m1_t src) {
- return vreinterpret_f32m1(src);
+ return __riscv_vreinterpret_f32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_f32m2(
@@ -455,7 +455,7 @@ vfloat32m1_t test_vreinterpret_v_i32m1_f32m1(vint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vreinterpret_v_i32m2_f32m2(vint32m2_t src) {
- return vreinterpret_f32m2(src);
+ return __riscv_vreinterpret_f32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_f32m4(
@@ -464,7 +464,7 @@ vfloat32m2_t test_vreinterpret_v_i32m2_f32m2(vint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vreinterpret_v_i32m4_f32m4(vint32m4_t src) {
- return vreinterpret_f32m4(src);
+ return __riscv_vreinterpret_f32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_f32m8(
@@ -473,7 +473,7 @@ vfloat32m4_t test_vreinterpret_v_i32m4_f32m4(vint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vreinterpret_v_i32m8_f32m8(vint32m8_t src) {
- return vreinterpret_f32m8(src);
+ return __riscv_vreinterpret_f32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_f32mf2(
@@ -482,7 +482,7 @@ vfloat32m8_t test_vreinterpret_v_i32m8_f32m8(vint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vreinterpret_v_u32mf2_f32mf2(vuint32mf2_t src) {
- return vreinterpret_f32mf2(src);
+ return __riscv_vreinterpret_f32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_f32m1(
@@ -491,7 +491,7 @@ vfloat32mf2_t test_vreinterpret_v_u32mf2_f32mf2(vuint32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vreinterpret_v_u32m1_f32m1(vuint32m1_t src) {
- return vreinterpret_f32m1(src);
+ return __riscv_vreinterpret_f32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_f32m2(
@@ -500,7 +500,7 @@ vfloat32m1_t test_vreinterpret_v_u32m1_f32m1(vuint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vreinterpret_v_u32m2_f32m2(vuint32m2_t src) {
- return vreinterpret_f32m2(src);
+ return __riscv_vreinterpret_f32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_f32m4(
@@ -509,7 +509,7 @@ vfloat32m2_t test_vreinterpret_v_u32m2_f32m2(vuint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vreinterpret_v_u32m4_f32m4(vuint32m4_t src) {
- return vreinterpret_f32m4(src);
+ return __riscv_vreinterpret_f32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_f32m8(
@@ -518,7 +518,7 @@ vfloat32m4_t test_vreinterpret_v_u32m4_f32m4(vuint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vreinterpret_v_u32m8_f32m8(vuint32m8_t src) {
- return vreinterpret_f32m8(src);
+ return __riscv_vreinterpret_f32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_u32mf2(
@@ -526,7 +526,7 @@ vfloat32m8_t test_vreinterpret_v_u32m8_f32m8(vuint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[SRC:%.*]]
//
vuint32mf2_t test_vreinterpret_v_i32mf2_u32mf2(vint32mf2_t src) {
- return vreinterpret_u32mf2(src);
+ return __riscv_vreinterpret_u32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_u32m1(
@@ -534,7 +534,7 @@ vuint32mf2_t test_vreinterpret_v_i32mf2_u32mf2(vint32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[SRC:%.*]]
//
vuint32m1_t test_vreinterpret_v_i32m1_u32m1(vint32m1_t src) {
- return vreinterpret_u32m1(src);
+ return __riscv_vreinterpret_u32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_u32m2(
@@ -542,7 +542,7 @@ vuint32m1_t test_vreinterpret_v_i32m1_u32m1(vint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[SRC:%.*]]
//
vuint32m2_t test_vreinterpret_v_i32m2_u32m2(vint32m2_t src) {
- return vreinterpret_u32m2(src);
+ return __riscv_vreinterpret_u32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_u32m4(
@@ -550,7 +550,7 @@ vuint32m2_t test_vreinterpret_v_i32m2_u32m2(vint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[SRC:%.*]]
//
vuint32m4_t test_vreinterpret_v_i32m4_u32m4(vint32m4_t src) {
- return vreinterpret_u32m4(src);
+ return __riscv_vreinterpret_u32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_u32m8(
@@ -558,7 +558,7 @@ vuint32m4_t test_vreinterpret_v_i32m4_u32m4(vint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[SRC:%.*]]
//
vuint32m8_t test_vreinterpret_v_i32m8_u32m8(vint32m8_t src) {
- return vreinterpret_u32m8(src);
+ return __riscv_vreinterpret_u32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_i32mf2(
@@ -566,7 +566,7 @@ vuint32m8_t test_vreinterpret_v_i32m8_u32m8(vint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[SRC:%.*]]
//
vint32mf2_t test_vreinterpret_v_u32mf2_i32mf2(vuint32mf2_t src) {
- return vreinterpret_i32mf2(src);
+ return __riscv_vreinterpret_i32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_i32m1(
@@ -574,7 +574,7 @@ vint32mf2_t test_vreinterpret_v_u32mf2_i32mf2(vuint32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[SRC:%.*]]
//
vint32m1_t test_vreinterpret_v_u32m1_i32m1(vuint32m1_t src) {
- return vreinterpret_i32m1(src);
+ return __riscv_vreinterpret_i32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_i32m2(
@@ -582,7 +582,7 @@ vint32m1_t test_vreinterpret_v_u32m1_i32m1(vuint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[SRC:%.*]]
//
vint32m2_t test_vreinterpret_v_u32m2_i32m2(vuint32m2_t src) {
- return vreinterpret_i32m2(src);
+ return __riscv_vreinterpret_i32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_i32m4(
@@ -590,7 +590,7 @@ vint32m2_t test_vreinterpret_v_u32m2_i32m2(vuint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[SRC:%.*]]
//
vint32m4_t test_vreinterpret_v_u32m4_i32m4(vuint32m4_t src) {
- return vreinterpret_i32m4(src);
+ return __riscv_vreinterpret_i32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_i32m8(
@@ -598,7 +598,7 @@ vint32m4_t test_vreinterpret_v_u32m4_i32m4(vuint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[SRC:%.*]]
//
vint32m8_t test_vreinterpret_v_u32m8_i32m8(vuint32m8_t src) {
- return vreinterpret_i32m8(src);
+ return __riscv_vreinterpret_i32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_i32mf2(
@@ -607,7 +607,7 @@ vint32m8_t test_vreinterpret_v_u32m8_i32m8(vuint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vreinterpret_v_f32mf2_i32mf2(vfloat32mf2_t src) {
- return vreinterpret_i32mf2(src);
+ return __riscv_vreinterpret_i32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_i32m1(
@@ -616,7 +616,7 @@ vint32mf2_t test_vreinterpret_v_f32mf2_i32mf2(vfloat32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vreinterpret_v_f32m1_i32m1(vfloat32m1_t src) {
- return vreinterpret_i32m1(src);
+ return __riscv_vreinterpret_i32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_i32m2(
@@ -625,7 +625,7 @@ vint32m1_t test_vreinterpret_v_f32m1_i32m1(vfloat32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vreinterpret_v_f32m2_i32m2(vfloat32m2_t src) {
- return vreinterpret_i32m2(src);
+ return __riscv_vreinterpret_i32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_i32m4(
@@ -634,7 +634,7 @@ vint32m2_t test_vreinterpret_v_f32m2_i32m2(vfloat32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vreinterpret_v_f32m4_i32m4(vfloat32m4_t src) {
- return vreinterpret_i32m4(src);
+ return __riscv_vreinterpret_i32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_i32m8(
@@ -643,7 +643,7 @@ vint32m4_t test_vreinterpret_v_f32m4_i32m4(vfloat32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vreinterpret_v_f32m8_i32m8(vfloat32m8_t src) {
- return vreinterpret_i32m8(src);
+ return __riscv_vreinterpret_i32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_u32mf2(
@@ -652,7 +652,7 @@ vint32m8_t test_vreinterpret_v_f32m8_i32m8(vfloat32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vreinterpret_v_f32mf2_u32mf2(vfloat32mf2_t src) {
- return vreinterpret_u32mf2(src);
+ return __riscv_vreinterpret_u32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_u32m1(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vreinterpret_v_f32mf2_u32mf2(vfloat32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vreinterpret_v_f32m1_u32m1(vfloat32m1_t src) {
- return vreinterpret_u32m1(src);
+ return __riscv_vreinterpret_u32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_u32m2(
@@ -670,7 +670,7 @@ vuint32m1_t test_vreinterpret_v_f32m1_u32m1(vfloat32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vreinterpret_v_f32m2_u32m2(vfloat32m2_t src) {
- return vreinterpret_u32m2(src);
+ return __riscv_vreinterpret_u32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_u32m4(
@@ -679,7 +679,7 @@ vuint32m2_t test_vreinterpret_v_f32m2_u32m2(vfloat32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vreinterpret_v_f32m4_u32m4(vfloat32m4_t src) {
- return vreinterpret_u32m4(src);
+ return __riscv_vreinterpret_u32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_u32m8(
@@ -688,7 +688,7 @@ vuint32m4_t test_vreinterpret_v_f32m4_u32m4(vfloat32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vreinterpret_v_f32m8_u32m8(vfloat32m8_t src) {
- return vreinterpret_u32m8(src);
+ return __riscv_vreinterpret_u32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_f64m1(
@@ -697,7 +697,7 @@ vuint32m8_t test_vreinterpret_v_f32m8_u32m8(vfloat32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vreinterpret_v_i64m1_f64m1(vint64m1_t src) {
- return vreinterpret_f64m1(src);
+ return __riscv_vreinterpret_f64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_f64m2(
@@ -706,7 +706,7 @@ vfloat64m1_t test_vreinterpret_v_i64m1_f64m1(vint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vreinterpret_v_i64m2_f64m2(vint64m2_t src) {
- return vreinterpret_f64m2(src);
+ return __riscv_vreinterpret_f64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_f64m4(
@@ -715,7 +715,7 @@ vfloat64m2_t test_vreinterpret_v_i64m2_f64m2(vint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vreinterpret_v_i64m4_f64m4(vint64m4_t src) {
- return vreinterpret_f64m4(src);
+ return __riscv_vreinterpret_f64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_f64m8(
@@ -724,7 +724,7 @@ vfloat64m4_t test_vreinterpret_v_i64m4_f64m4(vint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vreinterpret_v_i64m8_f64m8(vint64m8_t src) {
- return vreinterpret_f64m8(src);
+ return __riscv_vreinterpret_f64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_f64m1(
@@ -733,7 +733,7 @@ vfloat64m8_t test_vreinterpret_v_i64m8_f64m8(vint64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vreinterpret_v_u64m1_f64m1(vuint64m1_t src) {
- return vreinterpret_f64m1(src);
+ return __riscv_vreinterpret_f64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_f64m2(
@@ -742,7 +742,7 @@ vfloat64m1_t test_vreinterpret_v_u64m1_f64m1(vuint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vreinterpret_v_u64m2_f64m2(vuint64m2_t src) {
- return vreinterpret_f64m2(src);
+ return __riscv_vreinterpret_f64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_f64m4(
@@ -751,7 +751,7 @@ vfloat64m2_t test_vreinterpret_v_u64m2_f64m2(vuint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vreinterpret_v_u64m4_f64m4(vuint64m4_t src) {
- return vreinterpret_f64m4(src);
+ return __riscv_vreinterpret_f64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_f64m8(
@@ -760,7 +760,7 @@ vfloat64m4_t test_vreinterpret_v_u64m4_f64m4(vuint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vreinterpret_v_u64m8_f64m8(vuint64m8_t src) {
- return vreinterpret_f64m8(src);
+ return __riscv_vreinterpret_f64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_u64m1(
@@ -768,7 +768,7 @@ vfloat64m8_t test_vreinterpret_v_u64m8_f64m8(vuint64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[SRC:%.*]]
//
vuint64m1_t test_vreinterpret_v_i64m1_u64m1(vint64m1_t src) {
- return vreinterpret_u64m1(src);
+ return __riscv_vreinterpret_u64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_u64m2(
@@ -776,7 +776,7 @@ vuint64m1_t test_vreinterpret_v_i64m1_u64m1(vint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[SRC:%.*]]
//
vuint64m2_t test_vreinterpret_v_i64m2_u64m2(vint64m2_t src) {
- return vreinterpret_u64m2(src);
+ return __riscv_vreinterpret_u64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_u64m4(
@@ -784,7 +784,7 @@ vuint64m2_t test_vreinterpret_v_i64m2_u64m2(vint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[SRC:%.*]]
//
vuint64m4_t test_vreinterpret_v_i64m4_u64m4(vint64m4_t src) {
- return vreinterpret_u64m4(src);
+ return __riscv_vreinterpret_u64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_u64m8(
@@ -792,7 +792,7 @@ vuint64m4_t test_vreinterpret_v_i64m4_u64m4(vint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[SRC:%.*]]
//
vuint64m8_t test_vreinterpret_v_i64m8_u64m8(vint64m8_t src) {
- return vreinterpret_u64m8(src);
+ return __riscv_vreinterpret_u64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_i64m1(
@@ -800,7 +800,7 @@ vuint64m8_t test_vreinterpret_v_i64m8_u64m8(vint64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[SRC:%.*]]
//
vint64m1_t test_vreinterpret_v_u64m1_i64m1(vuint64m1_t src) {
- return vreinterpret_i64m1(src);
+ return __riscv_vreinterpret_i64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_i64m2(
@@ -808,7 +808,7 @@ vint64m1_t test_vreinterpret_v_u64m1_i64m1(vuint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[SRC:%.*]]
//
vint64m2_t test_vreinterpret_v_u64m2_i64m2(vuint64m2_t src) {
- return vreinterpret_i64m2(src);
+ return __riscv_vreinterpret_i64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_i64m4(
@@ -816,7 +816,7 @@ vint64m2_t test_vreinterpret_v_u64m2_i64m2(vuint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[SRC:%.*]]
//
vint64m4_t test_vreinterpret_v_u64m4_i64m4(vuint64m4_t src) {
- return vreinterpret_i64m4(src);
+ return __riscv_vreinterpret_i64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_i64m8(
@@ -824,7 +824,7 @@ vint64m4_t test_vreinterpret_v_u64m4_i64m4(vuint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[SRC:%.*]]
//
vint64m8_t test_vreinterpret_v_u64m8_i64m8(vuint64m8_t src) {
- return vreinterpret_i64m8(src);
+ return __riscv_vreinterpret_i64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_i64m1(
@@ -833,7 +833,7 @@ vint64m8_t test_vreinterpret_v_u64m8_i64m8(vuint64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vreinterpret_v_f64m1_i64m1(vfloat64m1_t src) {
- return vreinterpret_i64m1(src);
+ return __riscv_vreinterpret_i64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_i64m2(
@@ -842,7 +842,7 @@ vint64m1_t test_vreinterpret_v_f64m1_i64m1(vfloat64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vreinterpret_v_f64m2_i64m2(vfloat64m2_t src) {
- return vreinterpret_i64m2(src);
+ return __riscv_vreinterpret_i64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_i64m4(
@@ -851,7 +851,7 @@ vint64m2_t test_vreinterpret_v_f64m2_i64m2(vfloat64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vreinterpret_v_f64m4_i64m4(vfloat64m4_t src) {
- return vreinterpret_i64m4(src);
+ return __riscv_vreinterpret_i64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_i64m8(
@@ -860,7 +860,7 @@ vint64m4_t test_vreinterpret_v_f64m4_i64m4(vfloat64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vreinterpret_v_f64m8_i64m8(vfloat64m8_t src) {
- return vreinterpret_i64m8(src);
+ return __riscv_vreinterpret_i64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_u64m1(
@@ -869,7 +869,7 @@ vint64m8_t test_vreinterpret_v_f64m8_i64m8(vfloat64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vreinterpret_v_f64m1_u64m1(vfloat64m1_t src) {
- return vreinterpret_u64m1(src);
+ return __riscv_vreinterpret_u64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_u64m2(
@@ -878,7 +878,7 @@ vuint64m1_t test_vreinterpret_v_f64m1_u64m1(vfloat64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vreinterpret_v_f64m2_u64m2(vfloat64m2_t src) {
- return vreinterpret_u64m2(src);
+ return __riscv_vreinterpret_u64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_u64m4(
@@ -887,7 +887,7 @@ vuint64m2_t test_vreinterpret_v_f64m2_u64m2(vfloat64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vreinterpret_v_f64m4_u64m4(vfloat64m4_t src) {
- return vreinterpret_u64m4(src);
+ return __riscv_vreinterpret_u64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_u64m8(
@@ -896,7 +896,7 @@ vuint64m4_t test_vreinterpret_v_f64m4_u64m4(vfloat64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vreinterpret_v_f64m8_u64m8(vfloat64m8_t src) {
- return vreinterpret_u64m8(src);
+ return __riscv_vreinterpret_u64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_i16mf4(
@@ -905,7 +905,7 @@ vuint64m8_t test_vreinterpret_v_f64m8_u64m8(vfloat64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vreinterpret_v_i8mf4_i16mf4(vint8mf4_t src) {
- return vreinterpret_i16mf4(src);
+ return __riscv_vreinterpret_i16mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i16mf2(
@@ -914,7 +914,7 @@ vint16mf4_t test_vreinterpret_v_i8mf4_i16mf4(vint8mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vreinterpret_v_i8mf2_i16mf2(vint8mf2_t src) {
- return vreinterpret_i16mf2(src);
+ return __riscv_vreinterpret_i16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i16m1(
@@ -923,7 +923,7 @@ vint16mf2_t test_vreinterpret_v_i8mf2_i16mf2(vint8mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vreinterpret_v_i8m1_i16m1(vint8m1_t src) {
- return vreinterpret_i16m1(src);
+ return __riscv_vreinterpret_i16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i16m2(
@@ -932,7 +932,7 @@ vint16m1_t test_vreinterpret_v_i8m1_i16m1(vint8m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vreinterpret_v_i8m2_i16m2(vint8m2_t src) {
- return vreinterpret_i16m2(src);
+ return __riscv_vreinterpret_i16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i16m4(
@@ -941,7 +941,7 @@ vint16m2_t test_vreinterpret_v_i8m2_i16m2(vint8m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vreinterpret_v_i8m4_i16m4(vint8m4_t src) {
- return vreinterpret_i16m4(src);
+ return __riscv_vreinterpret_i16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i16m8(
@@ -950,7 +950,7 @@ vint16m4_t test_vreinterpret_v_i8m4_i16m4(vint8m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vreinterpret_v_i8m8_i16m8(vint8m8_t src) {
- return vreinterpret_i16m8(src);
+ return __riscv_vreinterpret_i16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_u16mf4(
@@ -959,7 +959,7 @@ vint16m8_t test_vreinterpret_v_i8m8_i16m8(vint8m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vreinterpret_v_u8mf4_u16mf4(vuint8mf4_t src) {
- return vreinterpret_u16mf4(src);
+ return __riscv_vreinterpret_u16mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u16mf2(
@@ -968,7 +968,7 @@ vuint16mf4_t test_vreinterpret_v_u8mf4_u16mf4(vuint8mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vreinterpret_v_u8mf2_u16mf2(vuint8mf2_t src) {
- return vreinterpret_u16mf2(src);
+ return __riscv_vreinterpret_u16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u16m1(
@@ -977,7 +977,7 @@ vuint16mf2_t test_vreinterpret_v_u8mf2_u16mf2(vuint8mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vreinterpret_v_u8m1_u16m1(vuint8m1_t src) {
- return vreinterpret_u16m1(src);
+ return __riscv_vreinterpret_u16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u16m2(
@@ -986,7 +986,7 @@ vuint16m1_t test_vreinterpret_v_u8m1_u16m1(vuint8m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vreinterpret_v_u8m2_u16m2(vuint8m2_t src) {
- return vreinterpret_u16m2(src);
+ return __riscv_vreinterpret_u16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u16m4(
@@ -995,7 +995,7 @@ vuint16m2_t test_vreinterpret_v_u8m2_u16m2(vuint8m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vreinterpret_v_u8m4_u16m4(vuint8m4_t src) {
- return vreinterpret_u16m4(src);
+ return __riscv_vreinterpret_u16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u16m8(
@@ -1004,7 +1004,7 @@ vuint16m4_t test_vreinterpret_v_u8m4_u16m4(vuint8m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vreinterpret_v_u8m8_u16m8(vuint8m8_t src) {
- return vreinterpret_u16m8(src);
+ return __riscv_vreinterpret_u16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i32mf2(
@@ -1013,7 +1013,7 @@ vuint16m8_t test_vreinterpret_v_u8m8_u16m8(vuint8m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vreinterpret_v_i8mf2_i32mf2(vint8mf2_t src) {
- return vreinterpret_i32mf2(src);
+ return __riscv_vreinterpret_i32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i32m1(
@@ -1022,7 +1022,7 @@ vint32mf2_t test_vreinterpret_v_i8mf2_i32mf2(vint8mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vreinterpret_v_i8m1_i32m1(vint8m1_t src) {
- return vreinterpret_i32m1(src);
+ return __riscv_vreinterpret_i32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i32m2(
@@ -1031,7 +1031,7 @@ vint32m1_t test_vreinterpret_v_i8m1_i32m1(vint8m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vreinterpret_v_i8m2_i32m2(vint8m2_t src) {
- return vreinterpret_i32m2(src);
+ return __riscv_vreinterpret_i32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i32m4(
@@ -1040,7 +1040,7 @@ vint32m2_t test_vreinterpret_v_i8m2_i32m2(vint8m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vreinterpret_v_i8m4_i32m4(vint8m4_t src) {
- return vreinterpret_i32m4(src);
+ return __riscv_vreinterpret_i32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i32m8(
@@ -1049,7 +1049,7 @@ vint32m4_t test_vreinterpret_v_i8m4_i32m4(vint8m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vreinterpret_v_i8m8_i32m8(vint8m8_t src) {
- return vreinterpret_i32m8(src);
+ return __riscv_vreinterpret_i32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u32mf2(
@@ -1058,7 +1058,7 @@ vint32m8_t test_vreinterpret_v_i8m8_i32m8(vint8m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vreinterpret_v_u8mf2_u32mf2(vuint8mf2_t src) {
- return vreinterpret_u32mf2(src);
+ return __riscv_vreinterpret_u32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u32m1(
@@ -1067,7 +1067,7 @@ vuint32mf2_t test_vreinterpret_v_u8mf2_u32mf2(vuint8mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vreinterpret_v_u8m1_u32m1(vuint8m1_t src) {
- return vreinterpret_u32m1(src);
+ return __riscv_vreinterpret_u32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u32m2(
@@ -1076,7 +1076,7 @@ vuint32m1_t test_vreinterpret_v_u8m1_u32m1(vuint8m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vreinterpret_v_u8m2_u32m2(vuint8m2_t src) {
- return vreinterpret_u32m2(src);
+ return __riscv_vreinterpret_u32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u32m4(
@@ -1085,7 +1085,7 @@ vuint32m2_t test_vreinterpret_v_u8m2_u32m2(vuint8m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vreinterpret_v_u8m4_u32m4(vuint8m4_t src) {
- return vreinterpret_u32m4(src);
+ return __riscv_vreinterpret_u32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u32m8(
@@ -1094,7 +1094,7 @@ vuint32m4_t test_vreinterpret_v_u8m4_u32m4(vuint8m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vreinterpret_v_u8m8_u32m8(vuint8m8_t src) {
- return vreinterpret_u32m8(src);
+ return __riscv_vreinterpret_u32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i64m1(
@@ -1103,7 +1103,7 @@ vuint32m8_t test_vreinterpret_v_u8m8_u32m8(vuint8m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vreinterpret_v_i8m1_i64m1(vint8m1_t src) {
- return vreinterpret_i64m1(src);
+ return __riscv_vreinterpret_i64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i64m2(
@@ -1112,7 +1112,7 @@ vint64m1_t test_vreinterpret_v_i8m1_i64m1(vint8m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vreinterpret_v_i8m2_i64m2(vint8m2_t src) {
- return vreinterpret_i64m2(src);
+ return __riscv_vreinterpret_i64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i64m4(
@@ -1121,7 +1121,7 @@ vint64m2_t test_vreinterpret_v_i8m2_i64m2(vint8m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vreinterpret_v_i8m4_i64m4(vint8m4_t src) {
- return vreinterpret_i64m4(src);
+ return __riscv_vreinterpret_i64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i64m8(
@@ -1130,7 +1130,7 @@ vint64m4_t test_vreinterpret_v_i8m4_i64m4(vint8m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vreinterpret_v_i8m8_i64m8(vint8m8_t src) {
- return vreinterpret_i64m8(src);
+ return __riscv_vreinterpret_i64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u64m1(
@@ -1139,7 +1139,7 @@ vint64m8_t test_vreinterpret_v_i8m8_i64m8(vint8m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vreinterpret_v_u8m1_u64m1(vuint8m1_t src) {
- return vreinterpret_u64m1(src);
+ return __riscv_vreinterpret_u64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u64m2(
@@ -1148,7 +1148,7 @@ vuint64m1_t test_vreinterpret_v_u8m1_u64m1(vuint8m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vreinterpret_v_u8m2_u64m2(vuint8m2_t src) {
- return vreinterpret_u64m2(src);
+ return __riscv_vreinterpret_u64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u64m4(
@@ -1157,7 +1157,7 @@ vuint64m2_t test_vreinterpret_v_u8m2_u64m2(vuint8m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vreinterpret_v_u8m4_u64m4(vuint8m4_t src) {
- return vreinterpret_u64m4(src);
+ return __riscv_vreinterpret_u64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u64m8(
@@ -1166,7 +1166,7 @@ vuint64m4_t test_vreinterpret_v_u8m4_u64m4(vuint8m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vreinterpret_v_u8m8_u64m8(vuint8m8_t src) {
- return vreinterpret_u64m8(src);
+ return __riscv_vreinterpret_u64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_i8mf4(
@@ -1175,7 +1175,7 @@ vuint64m8_t test_vreinterpret_v_u8m8_u64m8(vuint8m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vreinterpret_v_i16mf4_i8mf4(vint16mf4_t src) {
- return vreinterpret_i8mf4(src);
+ return __riscv_vreinterpret_i8mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i8mf2(
@@ -1184,7 +1184,7 @@ vint8mf4_t test_vreinterpret_v_i16mf4_i8mf4(vint16mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vreinterpret_v_i16mf2_i8mf2(vint16mf2_t src) {
- return vreinterpret_i8mf2(src);
+ return __riscv_vreinterpret_i8mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i8m1(
@@ -1193,7 +1193,7 @@ vint8mf2_t test_vreinterpret_v_i16mf2_i8mf2(vint16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vreinterpret_v_i16m1_i8m1(vint16m1_t src) {
- return vreinterpret_i8m1(src);
+ return __riscv_vreinterpret_i8m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i8m2(
@@ -1202,7 +1202,7 @@ vint8m1_t test_vreinterpret_v_i16m1_i8m1(vint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vreinterpret_v_i16m2_i8m2(vint16m2_t src) {
- return vreinterpret_i8m2(src);
+ return __riscv_vreinterpret_i8m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i8m4(
@@ -1211,7 +1211,7 @@ vint8m2_t test_vreinterpret_v_i16m2_i8m2(vint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vreinterpret_v_i16m4_i8m4(vint16m4_t src) {
- return vreinterpret_i8m4(src);
+ return __riscv_vreinterpret_i8m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i8m8(
@@ -1220,7 +1220,7 @@ vint8m4_t test_vreinterpret_v_i16m4_i8m4(vint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vreinterpret_v_i16m8_i8m8(vint16m8_t src) {
- return vreinterpret_i8m8(src);
+ return __riscv_vreinterpret_i8m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_u8mf4(
@@ -1229,7 +1229,7 @@ vint8m8_t test_vreinterpret_v_i16m8_i8m8(vint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vreinterpret_v_u16mf4_u8mf4(vuint16mf4_t src) {
- return vreinterpret_u8mf4(src);
+ return __riscv_vreinterpret_u8mf4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u8mf2(
@@ -1238,7 +1238,7 @@ vuint8mf4_t test_vreinterpret_v_u16mf4_u8mf4(vuint16mf4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vreinterpret_v_u16mf2_u8mf2(vuint16mf2_t src) {
- return vreinterpret_u8mf2(src);
+ return __riscv_vreinterpret_u8mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u8m1(
@@ -1247,7 +1247,7 @@ vuint8mf2_t test_vreinterpret_v_u16mf2_u8mf2(vuint16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vreinterpret_v_u16m1_u8m1(vuint16m1_t src) {
- return vreinterpret_u8m1(src);
+ return __riscv_vreinterpret_u8m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u8m2(
@@ -1256,7 +1256,7 @@ vuint8m1_t test_vreinterpret_v_u16m1_u8m1(vuint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vreinterpret_v_u16m2_u8m2(vuint16m2_t src) {
- return vreinterpret_u8m2(src);
+ return __riscv_vreinterpret_u8m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u8m4(
@@ -1265,7 +1265,7 @@ vuint8m2_t test_vreinterpret_v_u16m2_u8m2(vuint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vreinterpret_v_u16m4_u8m4(vuint16m4_t src) {
- return vreinterpret_u8m4(src);
+ return __riscv_vreinterpret_u8m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u8m8(
@@ -1274,7 +1274,7 @@ vuint8m4_t test_vreinterpret_v_u16m4_u8m4(vuint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vreinterpret_v_u16m8_u8m8(vuint16m8_t src) {
- return vreinterpret_u8m8(src);
+ return __riscv_vreinterpret_u8m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i32mf2(
@@ -1283,7 +1283,7 @@ vuint8m8_t test_vreinterpret_v_u16m8_u8m8(vuint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vreinterpret_v_i16mf2_i32mf2(vint16mf2_t src) {
- return vreinterpret_i32mf2(src);
+ return __riscv_vreinterpret_i32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i32m1(
@@ -1292,7 +1292,7 @@ vint32mf2_t test_vreinterpret_v_i16mf2_i32mf2(vint16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vreinterpret_v_i16m1_i32m1(vint16m1_t src) {
- return vreinterpret_i32m1(src);
+ return __riscv_vreinterpret_i32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i32m2(
@@ -1301,7 +1301,7 @@ vint32m1_t test_vreinterpret_v_i16m1_i32m1(vint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vreinterpret_v_i16m2_i32m2(vint16m2_t src) {
- return vreinterpret_i32m2(src);
+ return __riscv_vreinterpret_i32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i32m4(
@@ -1310,7 +1310,7 @@ vint32m2_t test_vreinterpret_v_i16m2_i32m2(vint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vreinterpret_v_i16m4_i32m4(vint16m4_t src) {
- return vreinterpret_i32m4(src);
+ return __riscv_vreinterpret_i32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i32m8(
@@ -1319,7 +1319,7 @@ vint32m4_t test_vreinterpret_v_i16m4_i32m4(vint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vreinterpret_v_i16m8_i32m8(vint16m8_t src) {
- return vreinterpret_i32m8(src);
+ return __riscv_vreinterpret_i32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u32mf2(
@@ -1328,7 +1328,7 @@ vint32m8_t test_vreinterpret_v_i16m8_i32m8(vint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vreinterpret_v_u16mf2_u32mf2(vuint16mf2_t src) {
- return vreinterpret_u32mf2(src);
+ return __riscv_vreinterpret_u32mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u32m1(
@@ -1337,7 +1337,7 @@ vuint32mf2_t test_vreinterpret_v_u16mf2_u32mf2(vuint16mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vreinterpret_v_u16m1_u32m1(vuint16m1_t src) {
- return vreinterpret_u32m1(src);
+ return __riscv_vreinterpret_u32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u32m2(
@@ -1346,7 +1346,7 @@ vuint32m1_t test_vreinterpret_v_u16m1_u32m1(vuint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vreinterpret_v_u16m2_u32m2(vuint16m2_t src) {
- return vreinterpret_u32m2(src);
+ return __riscv_vreinterpret_u32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u32m4(
@@ -1355,7 +1355,7 @@ vuint32m2_t test_vreinterpret_v_u16m2_u32m2(vuint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vreinterpret_v_u16m4_u32m4(vuint16m4_t src) {
- return vreinterpret_u32m4(src);
+ return __riscv_vreinterpret_u32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u32m8(
@@ -1364,7 +1364,7 @@ vuint32m4_t test_vreinterpret_v_u16m4_u32m4(vuint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vreinterpret_v_u16m8_u32m8(vuint16m8_t src) {
- return vreinterpret_u32m8(src);
+ return __riscv_vreinterpret_u32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i64m1(
@@ -1373,7 +1373,7 @@ vuint32m8_t test_vreinterpret_v_u16m8_u32m8(vuint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vreinterpret_v_i16m1_i64m1(vint16m1_t src) {
- return vreinterpret_i64m1(src);
+ return __riscv_vreinterpret_i64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i64m2(
@@ -1382,7 +1382,7 @@ vint64m1_t test_vreinterpret_v_i16m1_i64m1(vint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vreinterpret_v_i16m2_i64m2(vint16m2_t src) {
- return vreinterpret_i64m2(src);
+ return __riscv_vreinterpret_i64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i64m4(
@@ -1391,7 +1391,7 @@ vint64m2_t test_vreinterpret_v_i16m2_i64m2(vint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vreinterpret_v_i16m4_i64m4(vint16m4_t src) {
- return vreinterpret_i64m4(src);
+ return __riscv_vreinterpret_i64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i64m8(
@@ -1400,7 +1400,7 @@ vint64m4_t test_vreinterpret_v_i16m4_i64m4(vint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vreinterpret_v_i16m8_i64m8(vint16m8_t src) {
- return vreinterpret_i64m8(src);
+ return __riscv_vreinterpret_i64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u64m1(
@@ -1409,7 +1409,7 @@ vint64m8_t test_vreinterpret_v_i16m8_i64m8(vint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vreinterpret_v_u16m1_u64m1(vuint16m1_t src) {
- return vreinterpret_u64m1(src);
+ return __riscv_vreinterpret_u64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u64m2(
@@ -1418,7 +1418,7 @@ vuint64m1_t test_vreinterpret_v_u16m1_u64m1(vuint16m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vreinterpret_v_u16m2_u64m2(vuint16m2_t src) {
- return vreinterpret_u64m2(src);
+ return __riscv_vreinterpret_u64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u64m4(
@@ -1427,7 +1427,7 @@ vuint64m2_t test_vreinterpret_v_u16m2_u64m2(vuint16m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vreinterpret_v_u16m4_u64m4(vuint16m4_t src) {
- return vreinterpret_u64m4(src);
+ return __riscv_vreinterpret_u64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u64m8(
@@ -1436,7 +1436,7 @@ vuint64m4_t test_vreinterpret_v_u16m4_u64m4(vuint16m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vreinterpret_v_u16m8_u64m8(vuint16m8_t src) {
- return vreinterpret_u64m8(src);
+ return __riscv_vreinterpret_u64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i8mf2(
@@ -1445,7 +1445,7 @@ vuint64m8_t test_vreinterpret_v_u16m8_u64m8(vuint16m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vreinterpret_v_i32mf2_i8mf2(vint32mf2_t src) {
- return vreinterpret_i8mf2(src);
+ return __riscv_vreinterpret_i8mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i8m1(
@@ -1454,7 +1454,7 @@ vint8mf2_t test_vreinterpret_v_i32mf2_i8mf2(vint32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vreinterpret_v_i32m1_i8m1(vint32m1_t src) {
- return vreinterpret_i8m1(src);
+ return __riscv_vreinterpret_i8m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i8m2(
@@ -1463,7 +1463,7 @@ vint8m1_t test_vreinterpret_v_i32m1_i8m1(vint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vreinterpret_v_i32m2_i8m2(vint32m2_t src) {
- return vreinterpret_i8m2(src);
+ return __riscv_vreinterpret_i8m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i8m4(
@@ -1472,7 +1472,7 @@ vint8m2_t test_vreinterpret_v_i32m2_i8m2(vint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vreinterpret_v_i32m4_i8m4(vint32m4_t src) {
- return vreinterpret_i8m4(src);
+ return __riscv_vreinterpret_i8m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i8m8(
@@ -1481,7 +1481,7 @@ vint8m4_t test_vreinterpret_v_i32m4_i8m4(vint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vreinterpret_v_i32m8_i8m8(vint32m8_t src) {
- return vreinterpret_i8m8(src);
+ return __riscv_vreinterpret_i8m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u8mf2(
@@ -1490,7 +1490,7 @@ vint8m8_t test_vreinterpret_v_i32m8_i8m8(vint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vreinterpret_v_u32mf2_u8mf2(vuint32mf2_t src) {
- return vreinterpret_u8mf2(src);
+ return __riscv_vreinterpret_u8mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u8m1(
@@ -1499,7 +1499,7 @@ vuint8mf2_t test_vreinterpret_v_u32mf2_u8mf2(vuint32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vreinterpret_v_u32m1_u8m1(vuint32m1_t src) {
- return vreinterpret_u8m1(src);
+ return __riscv_vreinterpret_u8m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u8m2(
@@ -1508,7 +1508,7 @@ vuint8m1_t test_vreinterpret_v_u32m1_u8m1(vuint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vreinterpret_v_u32m2_u8m2(vuint32m2_t src) {
- return vreinterpret_u8m2(src);
+ return __riscv_vreinterpret_u8m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u8m4(
@@ -1517,7 +1517,7 @@ vuint8m2_t test_vreinterpret_v_u32m2_u8m2(vuint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vreinterpret_v_u32m4_u8m4(vuint32m4_t src) {
- return vreinterpret_u8m4(src);
+ return __riscv_vreinterpret_u8m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u8m8(
@@ -1526,7 +1526,7 @@ vuint8m4_t test_vreinterpret_v_u32m4_u8m4(vuint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vreinterpret_v_u32m8_u8m8(vuint32m8_t src) {
- return vreinterpret_u8m8(src);
+ return __riscv_vreinterpret_u8m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i16mf2(
@@ -1535,7 +1535,7 @@ vuint8m8_t test_vreinterpret_v_u32m8_u8m8(vuint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vreinterpret_v_i32mf2_i16mf2(vint32mf2_t src) {
- return vreinterpret_i16mf2(src);
+ return __riscv_vreinterpret_i16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i16m1(
@@ -1544,7 +1544,7 @@ vint16mf2_t test_vreinterpret_v_i32mf2_i16mf2(vint32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vreinterpret_v_i32m1_i16m1(vint32m1_t src) {
- return vreinterpret_i16m1(src);
+ return __riscv_vreinterpret_i16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i16m2(
@@ -1553,7 +1553,7 @@ vint16m1_t test_vreinterpret_v_i32m1_i16m1(vint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vreinterpret_v_i32m2_i16m2(vint32m2_t src) {
- return vreinterpret_i16m2(src);
+ return __riscv_vreinterpret_i16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i16m4(
@@ -1562,7 +1562,7 @@ vint16m2_t test_vreinterpret_v_i32m2_i16m2(vint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vreinterpret_v_i32m4_i16m4(vint32m4_t src) {
- return vreinterpret_i16m4(src);
+ return __riscv_vreinterpret_i16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i16m8(
@@ -1571,7 +1571,7 @@ vint16m4_t test_vreinterpret_v_i32m4_i16m4(vint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vreinterpret_v_i32m8_i16m8(vint32m8_t src) {
- return vreinterpret_i16m8(src);
+ return __riscv_vreinterpret_i16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u16mf2(
@@ -1580,7 +1580,7 @@ vint16m8_t test_vreinterpret_v_i32m8_i16m8(vint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vreinterpret_v_u32mf2_u16mf2(vuint32mf2_t src) {
- return vreinterpret_u16mf2(src);
+ return __riscv_vreinterpret_u16mf2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u16m1(
@@ -1589,7 +1589,7 @@ vuint16mf2_t test_vreinterpret_v_u32mf2_u16mf2(vuint32mf2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vreinterpret_v_u32m1_u16m1(vuint32m1_t src) {
- return vreinterpret_u16m1(src);
+ return __riscv_vreinterpret_u16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u16m2(
@@ -1598,7 +1598,7 @@ vuint16m1_t test_vreinterpret_v_u32m1_u16m1(vuint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vreinterpret_v_u32m2_u16m2(vuint32m2_t src) {
- return vreinterpret_u16m2(src);
+ return __riscv_vreinterpret_u16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u16m4(
@@ -1607,7 +1607,7 @@ vuint16m2_t test_vreinterpret_v_u32m2_u16m2(vuint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vreinterpret_v_u32m4_u16m4(vuint32m4_t src) {
- return vreinterpret_u16m4(src);
+ return __riscv_vreinterpret_u16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u16m8(
@@ -1616,7 +1616,7 @@ vuint16m4_t test_vreinterpret_v_u32m4_u16m4(vuint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vreinterpret_v_u32m8_u16m8(vuint32m8_t src) {
- return vreinterpret_u16m8(src);
+ return __riscv_vreinterpret_u16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i64m1(
@@ -1625,7 +1625,7 @@ vuint16m8_t test_vreinterpret_v_u32m8_u16m8(vuint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vreinterpret_v_i32m1_i64m1(vint32m1_t src) {
- return vreinterpret_i64m1(src);
+ return __riscv_vreinterpret_i64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i64m2(
@@ -1634,7 +1634,7 @@ vint64m1_t test_vreinterpret_v_i32m1_i64m1(vint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vreinterpret_v_i32m2_i64m2(vint32m2_t src) {
- return vreinterpret_i64m2(src);
+ return __riscv_vreinterpret_i64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i64m4(
@@ -1643,7 +1643,7 @@ vint64m2_t test_vreinterpret_v_i32m2_i64m2(vint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vreinterpret_v_i32m4_i64m4(vint32m4_t src) {
- return vreinterpret_i64m4(src);
+ return __riscv_vreinterpret_i64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i64m8(
@@ -1652,7 +1652,7 @@ vint64m4_t test_vreinterpret_v_i32m4_i64m4(vint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vreinterpret_v_i32m8_i64m8(vint32m8_t src) {
- return vreinterpret_i64m8(src);
+ return __riscv_vreinterpret_i64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u64m1(
@@ -1661,7 +1661,7 @@ vint64m8_t test_vreinterpret_v_i32m8_i64m8(vint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vreinterpret_v_u32m1_u64m1(vuint32m1_t src) {
- return vreinterpret_u64m1(src);
+ return __riscv_vreinterpret_u64m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u64m2(
@@ -1670,7 +1670,7 @@ vuint64m1_t test_vreinterpret_v_u32m1_u64m1(vuint32m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vreinterpret_v_u32m2_u64m2(vuint32m2_t src) {
- return vreinterpret_u64m2(src);
+ return __riscv_vreinterpret_u64m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u64m4(
@@ -1679,7 +1679,7 @@ vuint64m2_t test_vreinterpret_v_u32m2_u64m2(vuint32m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vreinterpret_v_u32m4_u64m4(vuint32m4_t src) {
- return vreinterpret_u64m4(src);
+ return __riscv_vreinterpret_u64m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u64m8(
@@ -1688,7 +1688,7 @@ vuint64m4_t test_vreinterpret_v_u32m4_u64m4(vuint32m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vreinterpret_v_u32m8_u64m8(vuint32m8_t src) {
- return vreinterpret_u64m8(src);
+ return __riscv_vreinterpret_u64m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i8m1(
@@ -1697,7 +1697,7 @@ vuint64m8_t test_vreinterpret_v_u32m8_u64m8(vuint32m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vreinterpret_v_i64m1_i8m1(vint64m1_t src) {
- return vreinterpret_i8m1(src);
+ return __riscv_vreinterpret_i8m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i8m2(
@@ -1706,7 +1706,7 @@ vint8m1_t test_vreinterpret_v_i64m1_i8m1(vint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vreinterpret_v_i64m2_i8m2(vint64m2_t src) {
- return vreinterpret_i8m2(src);
+ return __riscv_vreinterpret_i8m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i8m4(
@@ -1715,7 +1715,7 @@ vint8m2_t test_vreinterpret_v_i64m2_i8m2(vint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vreinterpret_v_i64m4_i8m4(vint64m4_t src) {
- return vreinterpret_i8m4(src);
+ return __riscv_vreinterpret_i8m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i8m8(
@@ -1724,7 +1724,7 @@ vint8m4_t test_vreinterpret_v_i64m4_i8m4(vint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vreinterpret_v_i64m8_i8m8(vint64m8_t src) {
- return vreinterpret_i8m8(src);
+ return __riscv_vreinterpret_i8m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u8m1(
@@ -1733,7 +1733,7 @@ vint8m8_t test_vreinterpret_v_i64m8_i8m8(vint64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vreinterpret_v_u64m1_u8m1(vuint64m1_t src) {
- return vreinterpret_u8m1(src);
+ return __riscv_vreinterpret_u8m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u8m2(
@@ -1742,7 +1742,7 @@ vuint8m1_t test_vreinterpret_v_u64m1_u8m1(vuint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vreinterpret_v_u64m2_u8m2(vuint64m2_t src) {
- return vreinterpret_u8m2(src);
+ return __riscv_vreinterpret_u8m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u8m4(
@@ -1751,7 +1751,7 @@ vuint8m2_t test_vreinterpret_v_u64m2_u8m2(vuint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vreinterpret_v_u64m4_u8m4(vuint64m4_t src) {
- return vreinterpret_u8m4(src);
+ return __riscv_vreinterpret_u8m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u8m8(
@@ -1760,7 +1760,7 @@ vuint8m4_t test_vreinterpret_v_u64m4_u8m4(vuint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vreinterpret_v_u64m8_u8m8(vuint64m8_t src) {
- return vreinterpret_u8m8(src);
+ return __riscv_vreinterpret_u8m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i16m1(
@@ -1769,7 +1769,7 @@ vuint8m8_t test_vreinterpret_v_u64m8_u8m8(vuint64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vreinterpret_v_i64m1_i16m1(vint64m1_t src) {
- return vreinterpret_i16m1(src);
+ return __riscv_vreinterpret_i16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i16m2(
@@ -1778,7 +1778,7 @@ vint16m1_t test_vreinterpret_v_i64m1_i16m1(vint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vreinterpret_v_i64m2_i16m2(vint64m2_t src) {
- return vreinterpret_i16m2(src);
+ return __riscv_vreinterpret_i16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i16m4(
@@ -1787,7 +1787,7 @@ vint16m2_t test_vreinterpret_v_i64m2_i16m2(vint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vreinterpret_v_i64m4_i16m4(vint64m4_t src) {
- return vreinterpret_i16m4(src);
+ return __riscv_vreinterpret_i16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i16m8(
@@ -1796,7 +1796,7 @@ vint16m4_t test_vreinterpret_v_i64m4_i16m4(vint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vreinterpret_v_i64m8_i16m8(vint64m8_t src) {
- return vreinterpret_i16m8(src);
+ return __riscv_vreinterpret_i16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u16m1(
@@ -1805,7 +1805,7 @@ vint16m8_t test_vreinterpret_v_i64m8_i16m8(vint64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vreinterpret_v_u64m1_u16m1(vuint64m1_t src) {
- return vreinterpret_u16m1(src);
+ return __riscv_vreinterpret_u16m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u16m2(
@@ -1814,7 +1814,7 @@ vuint16m1_t test_vreinterpret_v_u64m1_u16m1(vuint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vreinterpret_v_u64m2_u16m2(vuint64m2_t src) {
- return vreinterpret_u16m2(src);
+ return __riscv_vreinterpret_u16m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u16m4(
@@ -1823,7 +1823,7 @@ vuint16m2_t test_vreinterpret_v_u64m2_u16m2(vuint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vreinterpret_v_u64m4_u16m4(vuint64m4_t src) {
- return vreinterpret_u16m4(src);
+ return __riscv_vreinterpret_u16m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u16m8(
@@ -1832,7 +1832,7 @@ vuint16m4_t test_vreinterpret_v_u64m4_u16m4(vuint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vreinterpret_v_u64m8_u16m8(vuint64m8_t src) {
- return vreinterpret_u16m8(src);
+ return __riscv_vreinterpret_u16m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i32m1(
@@ -1841,7 +1841,7 @@ vuint16m8_t test_vreinterpret_v_u64m8_u16m8(vuint64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vreinterpret_v_i64m1_i32m1(vint64m1_t src) {
- return vreinterpret_i32m1(src);
+ return __riscv_vreinterpret_i32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i32m2(
@@ -1850,7 +1850,7 @@ vint32m1_t test_vreinterpret_v_i64m1_i32m1(vint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vreinterpret_v_i64m2_i32m2(vint64m2_t src) {
- return vreinterpret_i32m2(src);
+ return __riscv_vreinterpret_i32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i32m4(
@@ -1859,7 +1859,7 @@ vint32m2_t test_vreinterpret_v_i64m2_i32m2(vint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vreinterpret_v_i64m4_i32m4(vint64m4_t src) {
- return vreinterpret_i32m4(src);
+ return __riscv_vreinterpret_i32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i32m8(
@@ -1868,7 +1868,7 @@ vint32m4_t test_vreinterpret_v_i64m4_i32m4(vint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vreinterpret_v_i64m8_i32m8(vint64m8_t src) {
- return vreinterpret_i32m8(src);
+ return __riscv_vreinterpret_i32m8(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u32m1(
@@ -1877,7 +1877,7 @@ vint32m8_t test_vreinterpret_v_i64m8_i32m8(vint64m8_t src) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vreinterpret_v_u64m1_u32m1(vuint64m1_t src) {
- return vreinterpret_u32m1(src);
+ return __riscv_vreinterpret_u32m1(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u32m2(
@@ -1886,7 +1886,7 @@ vuint32m1_t test_vreinterpret_v_u64m1_u32m1(vuint64m1_t src) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vreinterpret_v_u64m2_u32m2(vuint64m2_t src) {
- return vreinterpret_u32m2(src);
+ return __riscv_vreinterpret_u32m2(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u32m4(
@@ -1895,7 +1895,7 @@ vuint32m2_t test_vreinterpret_v_u64m2_u32m2(vuint64m2_t src) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) {
- return vreinterpret_u32m4(src);
+ return __riscv_vreinterpret_u32m4(src);
}
// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u32m8(
@@ -1904,6 +1904,6 @@ vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vreinterpret_v_u64m8_u32m8(vuint64m8_t src) {
- return vreinterpret_u32m8(src);
+ return __riscv_vreinterpret_u32m8(src);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrem.c
index 8ea31b2254c4..96d549da2e8d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrem.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrem.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vrem(op1, op2, vl);
+ return __riscv_vrem(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrem(mask, op1, op2, vl);
+ return __riscv_vrem(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vremu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vremu.c
index 5de97a5f373d..b93a62ace2dd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vremu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vremu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vremu(op1, op2, vl);
+ return __riscv_vremu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vremu(mask, op1, op2, vl);
+ return __riscv_vremu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
index 5827b986add3..e8adf7e57f2c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t index, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t op1, size_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t index, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t op1, size_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t op1, vuint16m1_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t op1, vuint16m2_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t op1, vuint16m4_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t op1, vuint16m8_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8(
@@ -292,7 +292,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4(
@@ -301,7 +301,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4(
@@ -310,7 +310,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2(
@@ -319,7 +319,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2(
@@ -328,7 +328,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1(
@@ -337,7 +337,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1(
@@ -346,7 +346,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2(
@@ -355,7 +355,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2(
@@ -364,7 +364,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4(
@@ -373,7 +373,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4(
@@ -382,7 +382,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8(
@@ -391,7 +391,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8(
@@ -400,7 +400,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4(
@@ -409,7 +409,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4(
@@ -418,7 +418,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2(
@@ -427,7 +427,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2(
@@ -436,7 +436,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1(
@@ -445,7 +445,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1(
@@ -454,7 +454,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2(
@@ -463,7 +463,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2(
@@ -472,7 +472,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4(
@@ -481,7 +481,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4(
@@ -490,7 +490,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8(
@@ -499,7 +499,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8(
@@ -508,7 +508,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2(
@@ -517,7 +517,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2(
@@ -526,7 +526,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1(
@@ -535,7 +535,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1(
@@ -544,7 +544,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2(
@@ -553,7 +553,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2(
@@ -562,7 +562,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4(
@@ -571,7 +571,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4(
@@ -580,7 +580,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8(
@@ -589,7 +589,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8(
@@ -598,7 +598,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1(
@@ -607,7 +607,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1(
@@ -616,7 +616,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2(
@@ -625,7 +625,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2(
@@ -634,7 +634,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4(
@@ -643,7 +643,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4(
@@ -652,7 +652,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8(
@@ -661,7 +661,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8(
@@ -670,7 +670,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8(
@@ -679,7 +679,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8(
@@ -688,7 +688,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4(
@@ -697,7 +697,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4(
@@ -706,7 +706,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2(
@@ -715,7 +715,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2(
@@ -724,7 +724,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1(
@@ -733,7 +733,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1(
@@ -742,7 +742,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2(
@@ -751,7 +751,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2(
@@ -760,7 +760,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4(
@@ -769,7 +769,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4(
@@ -778,7 +778,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8(
@@ -787,7 +787,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8(
@@ -796,7 +796,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4(
@@ -805,7 +805,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4(
@@ -814,7 +814,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2(
@@ -823,7 +823,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2(
@@ -832,7 +832,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1(
@@ -841,7 +841,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1(
@@ -850,7 +850,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2(
@@ -859,7 +859,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2(
@@ -868,7 +868,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4(
@@ -877,7 +877,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4(
@@ -886,7 +886,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8(
@@ -895,7 +895,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8(
@@ -904,7 +904,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2(
@@ -913,7 +913,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2(
@@ -922,7 +922,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1(
@@ -931,7 +931,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1(
@@ -940,7 +940,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2(
@@ -949,7 +949,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2(
@@ -958,7 +958,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4(
@@ -967,7 +967,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4(
@@ -976,7 +976,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8(
@@ -985,7 +985,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8(
@@ -994,7 +994,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1(
@@ -1003,7 +1003,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1(
@@ -1012,7 +1012,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2(
@@ -1021,7 +1021,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2(
@@ -1030,7 +1030,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4(
@@ -1039,7 +1039,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4(
@@ -1048,7 +1048,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8(
@@ -1057,7 +1057,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8(
@@ -1066,7 +1066,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
- return vrgather(op1, index, vl);
+ return __riscv_vrgather(op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_m(
@@ -1075,7 +1075,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_m(
@@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_m(
@@ -1093,7 +1093,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_m(
@@ -1102,7 +1102,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_m(
@@ -1111,7 +1111,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_m(
@@ -1120,7 +1120,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_m(
@@ -1129,7 +1129,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t i
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_m(
@@ -1138,7 +1138,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_m(
@@ -1147,7 +1147,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_m(
@@ -1156,7 +1156,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_m(
@@ -1165,7 +1165,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_m(
@@ -1174,7 +1174,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m(
@@ -1183,7 +1183,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m(
@@ -1192,7 +1192,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m(
@@ -1201,7 +1201,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m(
@@ -1210,7 +1210,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m(
@@ -1219,7 +1219,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t i
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m(
@@ -1228,7 +1228,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m(
@@ -1237,7 +1237,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t i
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m(
@@ -1246,7 +1246,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m(
@@ -1255,7 +1255,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m(
@@ -1264,7 +1264,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m(
@@ -1273,7 +1273,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m(
@@ -1282,7 +1282,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m(
@@ -1291,7 +1291,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t i
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m(
@@ -1300,7 +1300,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m(
@@ -1309,7 +1309,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t i
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m(
@@ -1318,7 +1318,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m(
@@ -1327,7 +1327,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t i
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m(
@@ -1336,7 +1336,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m(
@@ -1345,7 +1345,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m(
@@ -1354,7 +1354,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m(
@@ -1363,7 +1363,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t index
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m(
@@ -1372,7 +1372,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m(
@@ -1381,7 +1381,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t index
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m(
@@ -1390,7 +1390,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m(
@@ -1399,7 +1399,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t index
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m(
@@ -1408,7 +1408,7 @@ vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t index
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m(
@@ -1417,7 +1417,7 @@ vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t index, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m(
@@ -1426,7 +1426,7 @@ vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t index
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m(
@@ -1435,7 +1435,7 @@ vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t index, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m(
@@ -1444,7 +1444,7 @@ vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t index
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m(
@@ -1453,7 +1453,7 @@ vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t index, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m(
@@ -1462,7 +1462,7 @@ vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t index
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m(
@@ -1471,7 +1471,7 @@ vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t index, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m(
@@ -1480,7 +1480,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m(
@@ -1489,7 +1489,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m(
@@ -1498,7 +1498,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m(
@@ -1507,7 +1507,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m(
@@ -1516,7 +1516,7 @@ vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m(
@@ -1525,7 +1525,7 @@ vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t index
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m(
@@ -1534,7 +1534,7 @@ vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t i
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m(
@@ -1543,7 +1543,7 @@ vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m(
@@ -1552,7 +1552,7 @@ vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t i
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m(
@@ -1561,7 +1561,7 @@ vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m(
@@ -1570,7 +1570,7 @@ vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t i
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m(
@@ -1579,7 +1579,7 @@ vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m(
@@ -1588,7 +1588,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m(
@@ -1597,7 +1597,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m(
@@ -1606,7 +1606,7 @@ vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m(
@@ -1615,7 +1615,7 @@ vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t index
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m(
@@ -1624,7 +1624,7 @@ vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m(
@@ -1633,7 +1633,7 @@ vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t index
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m(
@@ -1642,7 +1642,7 @@ vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t i
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m(
@@ -1651,7 +1651,7 @@ vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m(
@@ -1660,7 +1660,7 @@ vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t i
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m(
@@ -1669,7 +1669,7 @@ vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m(
@@ -1678,7 +1678,7 @@ vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m(
@@ -1687,7 +1687,7 @@ vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t index
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m(
@@ -1696,7 +1696,7 @@ vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m(
@@ -1705,7 +1705,7 @@ vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t index
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m(
@@ -1714,7 +1714,7 @@ vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m(
@@ -1723,7 +1723,7 @@ vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t index
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m(
@@ -1732,7 +1732,7 @@ vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t i
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m(
@@ -1741,7 +1741,7 @@ vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m(
@@ -1750,7 +1750,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m(
@@ -1759,7 +1759,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t ind
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m(
@@ -1768,7 +1768,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m(
@@ -1777,7 +1777,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t ind
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m(
@@ -1786,7 +1786,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m(
@@ -1795,7 +1795,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t ind
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m(
@@ -1804,7 +1804,7 @@ vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t ind
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m(
@@ -1813,7 +1813,7 @@ vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m(
@@ -1822,7 +1822,7 @@ vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t ind
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m(
@@ -1831,7 +1831,7 @@ vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m(
@@ -1840,7 +1840,7 @@ vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t ind
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m(
@@ -1849,7 +1849,7 @@ vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m(
@@ -1858,7 +1858,7 @@ vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t ind
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m(
@@ -1867,7 +1867,7 @@ vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t index,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m(
@@ -1876,7 +1876,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m(
@@ -1885,7 +1885,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m(
@@ -1894,7 +1894,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m(
@@ -1903,7 +1903,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m(
@@ -1912,7 +1912,7 @@ vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m(
@@ -1921,7 +1921,7 @@ vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t ind
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m(
@@ -1930,7 +1930,7 @@ vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m(
@@ -1939,7 +1939,7 @@ vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t inde
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m(
@@ -1948,7 +1948,7 @@ vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m(
@@ -1957,7 +1957,7 @@ vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t inde
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m(
@@ -1966,7 +1966,7 @@ vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m(
@@ -1975,7 +1975,7 @@ vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t inde
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m(
@@ -1984,7 +1984,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m(
@@ -1993,7 +1993,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m(
@@ -2002,7 +2002,7 @@ vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m(
@@ -2011,7 +2011,7 @@ vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t ind
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m(
@@ -2020,7 +2020,7 @@ vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m(
@@ -2029,7 +2029,7 @@ vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t ind
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m(
@@ -2038,7 +2038,7 @@ vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m(
@@ -2047,7 +2047,7 @@ vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t inde
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m(
@@ -2056,7 +2056,7 @@ vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m(
@@ -2065,7 +2065,7 @@ vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t inde
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m(
@@ -2074,7 +2074,7 @@ vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m(
@@ -2083,7 +2083,7 @@ vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t ind
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m(
@@ -2092,7 +2092,7 @@ vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m(
@@ -2101,7 +2101,7 @@ vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t ind
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m(
@@ -2110,7 +2110,7 @@ vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m(
@@ -2119,7 +2119,7 @@ vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t ind
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m(
@@ -2128,6 +2128,6 @@ vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t index, size_t vl) {
- return vrgather(mask, op1, index, vl);
+ return __riscv_vrgather(mask, op1, index, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgatherei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgatherei16.c
index 5cc9cdc459af..d32b54fd33b4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgatherei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgatherei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgatherei16_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgatherei16_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgatherei16_vv_f16m1(vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1(vfloat16m1_t op1, vuint16m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgatherei16_vv_f16m2(vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2(vfloat16m2_t op1, vuint16m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgatherei16_vv_f16m4(vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4(vfloat16m4_t op1, vuint16m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgatherei16_vv_f16m8(vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8(vfloat16m8_t op1, vuint16m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4(
@@ -157,7 +157,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2(
@@ -166,7 +166,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1(
@@ -175,7 +175,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2(
@@ -184,7 +184,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4(
@@ -193,7 +193,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4(
@@ -202,7 +202,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2(
@@ -211,7 +211,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1(
@@ -220,7 +220,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2(
@@ -229,7 +229,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4(
@@ -238,7 +238,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8(
@@ -247,7 +247,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2(
@@ -256,7 +256,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1(
@@ -265,7 +265,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2(
@@ -274,7 +274,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4(
@@ -283,7 +283,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8(
@@ -292,7 +292,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1(
@@ -301,7 +301,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2(
@@ -310,7 +310,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4(
@@ -319,7 +319,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8(
@@ -328,7 +328,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8(
@@ -337,7 +337,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4(
@@ -346,7 +346,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2(
@@ -355,7 +355,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1(
@@ -364,7 +364,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2(
@@ -373,7 +373,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4(
@@ -382,7 +382,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4(
@@ -391,7 +391,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2(
@@ -400,7 +400,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1(
@@ -409,7 +409,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2(
@@ -418,7 +418,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4(
@@ -427,7 +427,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8(
@@ -436,7 +436,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2(
@@ -445,7 +445,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1(
@@ -454,7 +454,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2(
@@ -463,7 +463,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4(
@@ -472,7 +472,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8(
@@ -481,7 +481,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1(
@@ -490,7 +490,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2(
@@ -499,7 +499,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4(
@@ -508,7 +508,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8(
@@ -517,7 +517,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(op1, op2, vl);
+ return __riscv_vrgatherei16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_m(
@@ -526,7 +526,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_m(
@@ -535,7 +535,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_m(
@@ -544,7 +544,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_m(
@@ -553,7 +553,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_m(
@@ -562,7 +562,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_m(
@@ -571,7 +571,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m(
@@ -580,7 +580,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m(
@@ -589,7 +589,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m(
@@ -598,7 +598,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m(
@@ -607,7 +607,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m(
@@ -616,7 +616,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m(
@@ -625,7 +625,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m(
@@ -634,7 +634,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m(
@@ -643,7 +643,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m(
@@ -652,7 +652,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m(
@@ -661,7 +661,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m(
@@ -670,7 +670,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m(
@@ -679,7 +679,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m(
@@ -688,7 +688,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m(
@@ -697,7 +697,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m(
@@ -706,7 +706,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m(
@@ -715,7 +715,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m(
@@ -724,7 +724,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m(
@@ -733,7 +733,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m(
@@ -742,7 +742,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m(
@@ -751,7 +751,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m(
@@ -760,7 +760,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m(
@@ -769,7 +769,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m(
@@ -778,7 +778,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m(
@@ -787,7 +787,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m(
@@ -796,7 +796,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m(
@@ -805,7 +805,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m(
@@ -814,7 +814,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m(
@@ -823,7 +823,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m(
@@ -832,7 +832,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m(
@@ -841,7 +841,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m(
@@ -850,7 +850,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m(
@@ -859,7 +859,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m(
@@ -868,7 +868,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m(
@@ -877,7 +877,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m(
@@ -886,7 +886,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m(
@@ -895,7 +895,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m(
@@ -904,7 +904,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m(
@@ -913,7 +913,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m(
@@ -922,7 +922,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m(
@@ -931,7 +931,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m(
@@ -940,7 +940,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m(
@@ -949,7 +949,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m(
@@ -958,7 +958,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m(
@@ -967,7 +967,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m(
@@ -976,7 +976,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m(
@@ -985,7 +985,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m(
@@ -994,7 +994,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m(
@@ -1003,7 +1003,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m(
@@ -1012,7 +1012,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m(
@@ -1021,7 +1021,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m(
@@ -1030,6 +1030,6 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16(mask, op1, op2, vl);
+ return __riscv_vrgatherei16(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrsub.c
index eab37f55d7e8..513bbf105fe0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrsub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
@@ -21,7 +21,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
@@ -30,7 +30,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
@@ -39,7 +39,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
@@ -48,7 +48,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
@@ -57,7 +57,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
@@ -66,7 +66,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
@@ -75,7 +75,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
@@ -84,7 +84,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
@@ -93,7 +93,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
@@ -102,7 +102,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
@@ -111,7 +111,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
@@ -120,7 +120,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
@@ -129,7 +129,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
@@ -138,7 +138,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
@@ -147,7 +147,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
@@ -156,7 +156,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
@@ -165,7 +165,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
@@ -174,7 +174,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
@@ -183,7 +183,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
@@ -192,7 +192,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
@@ -201,7 +201,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
@@ -210,7 +210,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
@@ -219,7 +219,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
@@ -228,7 +228,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
@@ -237,7 +237,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
@@ -246,7 +246,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
@@ -255,7 +255,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
@@ -264,7 +264,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
@@ -273,7 +273,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
@@ -282,7 +282,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
@@ -291,7 +291,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
@@ -300,7 +300,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
@@ -309,7 +309,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
@@ -318,7 +318,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
@@ -327,7 +327,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
@@ -336,7 +336,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
@@ -345,7 +345,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
@@ -354,7 +354,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
@@ -363,7 +363,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
@@ -372,7 +372,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
@@ -381,7 +381,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
@@ -390,7 +390,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vrsub(op1, op2, vl);
+ return __riscv_vrsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m(
@@ -426,7 +426,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m(
@@ -435,7 +435,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m(
@@ -444,7 +444,7 @@ vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m(
@@ -453,7 +453,7 @@ vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m(
@@ -462,7 +462,7 @@ vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m(
@@ -471,7 +471,7 @@ vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m(
@@ -480,7 +480,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m(
@@ -489,7 +489,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m(
@@ -498,7 +498,7 @@ vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m(
@@ -507,7 +507,7 @@ vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m(
@@ -516,7 +516,7 @@ vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m(
@@ -525,7 +525,7 @@ vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m(
@@ -534,7 +534,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m(
@@ -543,7 +543,7 @@ vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m(
@@ -552,7 +552,7 @@ vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m(
@@ -561,7 +561,7 @@ vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m(
@@ -570,7 +570,7 @@ vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m(
@@ -579,7 +579,7 @@ vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m(
@@ -588,7 +588,7 @@ vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m(
@@ -597,7 +597,7 @@ vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m(
@@ -606,7 +606,7 @@ vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m(
@@ -615,7 +615,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m(
@@ -624,7 +624,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m(
@@ -633,7 +633,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m(
@@ -642,7 +642,7 @@ vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m(
@@ -651,7 +651,7 @@ vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m(
@@ -660,7 +660,7 @@ vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m(
@@ -669,7 +669,7 @@ vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m(
@@ -696,7 +696,7 @@ vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m(
@@ -705,7 +705,7 @@ vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m(
@@ -714,7 +714,7 @@ vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m(
@@ -723,7 +723,7 @@ vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m(
@@ -732,7 +732,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m(
@@ -741,7 +741,7 @@ vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m(
@@ -750,7 +750,7 @@ vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m(
@@ -759,7 +759,7 @@ vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m(
@@ -768,7 +768,7 @@ vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m(
@@ -777,7 +777,7 @@ vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m(
@@ -786,7 +786,7 @@ vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vrsub(mask, op1, op2, vl);
+ return __riscv_vrsub(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c
index c81a6921bb15..628997191aa9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vsadd(op1, op2, vl);
+ return __riscv_vsadd(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsadd(mask, op1, op2, vl);
+ return __riscv_vsadd(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c
index 1ddbe71de213..bd5d6526aa42 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsaddu(op1, op2, vl);
+ return __riscv_vsaddu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsaddu(mask, op1, op2, vl);
+ return __riscv_vsaddu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsbc.c
index 9a1a8968268f..3bdf6207ab2d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsbc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsbc.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t bor
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t bor
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t bor
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8(
@@ -408,7 +408,7 @@ vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borr
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borr
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borr
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t borr
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t borr
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borr
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borro
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borro
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borro
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t borr
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borr
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borr
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borro
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borro
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borr
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borr
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borr
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t borrowi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8(
@@ -795,6 +795,6 @@ vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borro
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc(op1, op2, borrowin, vl);
+ return __riscv_vsbc(op1, op2, borrowin, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse16.c
index 2181afd5a8c8..f87d6c04ec60 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16mf4(_Float16 *base, vfloat16mf4_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vse16_v_f16mf4(_Float16 *base, vfloat16mf4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16mf2(_Float16 *base, vfloat16mf2_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vse16_v_f16mf2(_Float16 *base, vfloat16mf2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m2(_Float16 *base, vfloat16m2_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16m4(
@@ -49,7 +49,7 @@ void test_vse16_v_f16m2(_Float16 *base, vfloat16m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16m8(
@@ -58,7 +58,7 @@ void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16mf4(
@@ -67,7 +67,7 @@ void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16mf2(
@@ -76,7 +76,7 @@ void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16m1(
@@ -85,7 +85,7 @@ void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16m2(
@@ -94,7 +94,7 @@ void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16m4(
@@ -103,7 +103,7 @@ void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16m8(
@@ -112,7 +112,7 @@ void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16mf4(
@@ -121,7 +121,7 @@ void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16mf2(
@@ -130,7 +130,7 @@ void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16m1(
@@ -139,7 +139,7 @@ void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16m2(
@@ -148,7 +148,7 @@ void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16m4(
@@ -157,7 +157,7 @@ void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16m8(
@@ -166,7 +166,7 @@ void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) {
- return vse16(base, value, vl);
+ return __riscv_vse16(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16mf4_m(
@@ -175,7 +175,7 @@ void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16mf2_m(
@@ -184,7 +184,7 @@ void test_vse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16m1_m(
@@ -193,7 +193,7 @@ void test_vse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16m2_m(
@@ -202,7 +202,7 @@ void test_vse16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16m4_m(
@@ -211,7 +211,7 @@ void test_vse16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_f16m8_m(
@@ -220,7 +220,7 @@ void test_vse16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_f16m8_m(vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16mf4_m(
@@ -229,7 +229,7 @@ void test_vse16_v_f16m8_m(vbool2_t mask, _Float16 *base, vfloat16m8_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16mf2_m(
@@ -238,7 +238,7 @@ void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16m1_m(
@@ -247,7 +247,7 @@ void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16m2_m(
@@ -256,7 +256,7 @@ void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16m4_m(
@@ -265,7 +265,7 @@ void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_i16m8_m(
@@ -274,7 +274,7 @@ void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16mf4_m(
@@ -283,7 +283,7 @@ void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16mf2_m(
@@ -292,7 +292,7 @@ void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16m1_m(
@@ -301,7 +301,7 @@ void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16m2_m(
@@ -310,7 +310,7 @@ void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16m4_m(
@@ -319,7 +319,7 @@ void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse16_v_u16m8_m(
@@ -328,6 +328,6 @@ void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size
// CHECK-RV64-NEXT: ret void
//
void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) {
- return vse16(mask, base, value, vl);
+ return __riscv_vse16(mask, base, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse32.c
index 0689962f3d0f..acdfbe84198a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_f32m2(
@@ -31,7 +31,7 @@ void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_f32m4(
@@ -40,7 +40,7 @@ void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_f32m8(
@@ -49,7 +49,7 @@ void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32mf2(
@@ -58,7 +58,7 @@ void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32m1(
@@ -67,7 +67,7 @@ void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32m2(
@@ -76,7 +76,7 @@ void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32m4(
@@ -85,7 +85,7 @@ void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32m8(
@@ -94,7 +94,7 @@ void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32mf2(
@@ -103,7 +103,7 @@ void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32m1(
@@ -112,7 +112,7 @@ void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32m2(
@@ -121,7 +121,7 @@ void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32m4(
@@ -130,7 +130,7 @@ void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32m8(
@@ -139,7 +139,7 @@ void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) {
- return vse32(base, value, vl);
+ return __riscv_vse32(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m(
@@ -148,7 +148,7 @@ void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_f32m1_m(
@@ -157,7 +157,7 @@ void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_f32m2_m(
@@ -166,7 +166,7 @@ void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_f32m4_m(
@@ -175,7 +175,7 @@ void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_f32m8_m(
@@ -184,7 +184,7 @@ void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32mf2_m(
@@ -193,7 +193,7 @@ void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32m1_m(
@@ -202,7 +202,7 @@ void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32m2_m(
@@ -211,7 +211,7 @@ void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32m4_m(
@@ -220,7 +220,7 @@ void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_i32m8_m(
@@ -229,7 +229,7 @@ void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32mf2_m(
@@ -238,7 +238,7 @@ void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32m1_m(
@@ -247,7 +247,7 @@ void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32m2_m(
@@ -256,7 +256,7 @@ void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32m4_m(
@@ -265,7 +265,7 @@ void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse32_v_u32m8_m(
@@ -274,6 +274,6 @@ void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size
// CHECK-RV64-NEXT: ret void
//
void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) {
- return vse32(mask, base, value, vl);
+ return __riscv_vse32(mask, base, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse64.c
index c0a0d0b11562..b09e12c8c894 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_f64m2(
@@ -22,7 +22,7 @@ void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_f64m4(
@@ -31,7 +31,7 @@ void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_f64m8(
@@ -40,7 +40,7 @@ void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_i64m1(
@@ -49,7 +49,7 @@ void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_i64m2(
@@ -58,7 +58,7 @@ void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_i64m4(
@@ -67,7 +67,7 @@ void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_i64m8(
@@ -76,7 +76,7 @@ void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_u64m1(
@@ -85,7 +85,7 @@ void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_u64m2(
@@ -94,7 +94,7 @@ void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_u64m4(
@@ -103,7 +103,7 @@ void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_u64m8(
@@ -112,7 +112,7 @@ void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) {
- return vse64(base, value, vl);
+ return __riscv_vse64(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_f64m1_m(
@@ -121,7 +121,7 @@ void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_f64m2_m(
@@ -130,7 +130,7 @@ void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_f64m4_m(
@@ -139,7 +139,7 @@ void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_f64m8_m(
@@ -148,7 +148,7 @@ void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_i64m1_m(
@@ -157,7 +157,7 @@ void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_i64m2_m(
@@ -166,7 +166,7 @@ void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_i64m4_m(
@@ -175,7 +175,7 @@ void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_i64m8_m(
@@ -184,7 +184,7 @@ void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_u64m1_m(
@@ -193,7 +193,7 @@ void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_u64m2_m(
@@ -202,7 +202,7 @@ void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_u64m4_m(
@@ -211,7 +211,7 @@ void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse64_v_u64m8_m(
@@ -220,6 +220,6 @@ void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) {
- return vse64(mask, base, value, vl);
+ return __riscv_vse64(mask, base, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse8.c
index b83c6a0e2578..8059698807f5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vse8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8m2(
@@ -48,7 +48,7 @@ void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8m4(
@@ -57,7 +57,7 @@ void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8m8(
@@ -66,7 +66,7 @@ void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8mf8(
@@ -75,7 +75,7 @@ void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8mf4(
@@ -84,7 +84,7 @@ void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8mf2(
@@ -93,7 +93,7 @@ void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8m1(
@@ -102,7 +102,7 @@ void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8m2(
@@ -111,7 +111,7 @@ void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8m4(
@@ -120,7 +120,7 @@ void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8m8(
@@ -129,7 +129,7 @@ void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) {
- return vse8(base, value, vl);
+ return __riscv_vse8(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8mf8_m(
@@ -138,7 +138,7 @@ void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8mf4_m(
@@ -147,7 +147,7 @@ void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8mf2_m(
@@ -156,7 +156,7 @@ void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8m1_m(
@@ -165,7 +165,7 @@ void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8m2_m(
@@ -174,7 +174,7 @@ void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl)
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8m4_m(
@@ -183,7 +183,7 @@ void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl)
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_i8m8_m(
@@ -192,7 +192,7 @@ void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl)
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8mf8_m(
@@ -201,7 +201,7 @@ void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl)
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8mf4_m(
@@ -210,7 +210,7 @@ void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8mf2_m(
@@ -219,7 +219,7 @@ void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8m1_m(
@@ -228,7 +228,7 @@ void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8m2_m(
@@ -237,7 +237,7 @@ void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t v
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8m4_m(
@@ -246,7 +246,7 @@ void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t v
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
// CHECK-RV64-LABEL: @test_vse8_v_u8m8_m(
@@ -255,6 +255,6 @@ void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t v
// CHECK-RV64-NEXT: ret void
//
void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) {
- return vse8(mask, base, value, vl);
+ return __riscv_vse8(mask, base, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vset.c
index 2efddc582be2..537c4efc8239 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vset.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vset.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, size_t index, vfloat16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m4(
@@ -22,7 +22,7 @@ vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, size_t index, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, size_t index, vfloat16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m4(
@@ -31,7 +31,7 @@ vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, size_t index, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, size_t index, vfloat16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m8(
@@ -40,7 +40,7 @@ vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, size_t index, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, size_t index, vfloat16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m8(
@@ -49,7 +49,7 @@ vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, size_t index, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, size_t index, vfloat16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f16m4_f16m8(
@@ -58,7 +58,7 @@ vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, size_t index, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, size_t index, vfloat16m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, size_t index, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4(
@@ -76,7 +76,7 @@ vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8(
@@ -94,7 +94,7 @@ vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2(
@@ -121,7 +121,7 @@ vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8(
@@ -157,7 +157,7 @@ vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4(
@@ -184,7 +184,7 @@ vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4(
@@ -193,7 +193,7 @@ vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8(
@@ -202,7 +202,7 @@ vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8(
@@ -211,7 +211,7 @@ vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8(
@@ -220,7 +220,7 @@ vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2(
@@ -229,7 +229,7 @@ vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4(
@@ -238,7 +238,7 @@ vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4(
@@ -247,7 +247,7 @@ vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8(
@@ -256,7 +256,7 @@ vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8(
@@ -265,7 +265,7 @@ vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8(
@@ -274,7 +274,7 @@ vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2(
@@ -283,7 +283,7 @@ vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4(
@@ -292,7 +292,7 @@ vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4(
@@ -301,7 +301,7 @@ vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8(
@@ -310,7 +310,7 @@ vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8(
@@ -319,7 +319,7 @@ vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8(
@@ -328,7 +328,7 @@ vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2(
@@ -337,7 +337,7 @@ vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4(
@@ -346,7 +346,7 @@ vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4(
@@ -355,7 +355,7 @@ vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8(
@@ -364,7 +364,7 @@ vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8(
@@ -373,7 +373,7 @@ vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8(
@@ -382,7 +382,7 @@ vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2(
@@ -391,7 +391,7 @@ vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4(
@@ -400,7 +400,7 @@ vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4(
@@ -409,7 +409,7 @@ vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8(
@@ -418,7 +418,7 @@ vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8(
@@ -427,7 +427,7 @@ vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8(
@@ -436,7 +436,7 @@ vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2(
@@ -445,7 +445,7 @@ vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4(
@@ -454,7 +454,7 @@ vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4(
@@ -463,7 +463,7 @@ vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8(
@@ -472,7 +472,7 @@ vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8(
@@ -481,7 +481,7 @@ vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8(
@@ -490,7 +490,7 @@ vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2(
@@ -499,7 +499,7 @@ vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4(
@@ -508,7 +508,7 @@ vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4(
@@ -517,7 +517,7 @@ vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8(
@@ -526,7 +526,7 @@ vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8(
@@ -535,7 +535,7 @@ vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8(
@@ -544,7 +544,7 @@ vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2(
@@ -553,7 +553,7 @@ vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4(
@@ -562,7 +562,7 @@ vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4(
@@ -571,7 +571,7 @@ vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8(
@@ -580,7 +580,7 @@ vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8(
@@ -589,7 +589,7 @@ vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8(
@@ -598,6 +598,6 @@ vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c
index 83fee4ed72b4..e217c6e45097 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
@@ -21,7 +21,7 @@ vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
@@ -30,7 +30,7 @@ vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
@@ -39,7 +39,7 @@ vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
@@ -48,7 +48,7 @@ vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
@@ -57,7 +57,7 @@ vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
@@ -66,7 +66,7 @@ vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
- return vsext_vf4(op1, vl);
+ return __riscv_vsext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
@@ -75,7 +75,7 @@ vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
- return vsext_vf4(op1, vl);
+ return __riscv_vsext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
@@ -84,7 +84,7 @@ vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
- return vsext_vf4(op1, vl);
+ return __riscv_vsext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
@@ -93,7 +93,7 @@ vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
- return vsext_vf4(op1, vl);
+ return __riscv_vsext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
@@ -102,7 +102,7 @@ vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
- return vsext_vf4(op1, vl);
+ return __riscv_vsext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
@@ -111,7 +111,7 @@ vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
- return vsext_vf8(op1, vl);
+ return __riscv_vsext_vf8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
@@ -120,7 +120,7 @@ vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
- return vsext_vf8(op1, vl);
+ return __riscv_vsext_vf8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
@@ -129,7 +129,7 @@ vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
- return vsext_vf8(op1, vl);
+ return __riscv_vsext_vf8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
@@ -138,7 +138,7 @@ vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
- return vsext_vf8(op1, vl);
+ return __riscv_vsext_vf8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
@@ -147,7 +147,7 @@ vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
@@ -156,7 +156,7 @@ vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
@@ -165,7 +165,7 @@ vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
@@ -174,7 +174,7 @@ vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
@@ -183,7 +183,7 @@ vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
@@ -192,7 +192,7 @@ vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
- return vsext_vf4(op1, vl);
+ return __riscv_vsext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
@@ -201,7 +201,7 @@ vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
- return vsext_vf4(op1, vl);
+ return __riscv_vsext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
@@ -210,7 +210,7 @@ vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
- return vsext_vf4(op1, vl);
+ return __riscv_vsext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
@@ -219,7 +219,7 @@ vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
- return vsext_vf4(op1, vl);
+ return __riscv_vsext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
@@ -228,7 +228,7 @@ vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
@@ -237,7 +237,7 @@ vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
@@ -246,7 +246,7 @@ vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
@@ -255,7 +255,7 @@ vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
- return vsext_vf2(op1, vl);
+ return __riscv_vsext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m(
@@ -264,7 +264,7 @@ vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m(
@@ -273,7 +273,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m(
@@ -282,7 +282,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m(
@@ -291,7 +291,7 @@ vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m(
@@ -300,7 +300,7 @@ vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint8m2_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m(
@@ -309,7 +309,7 @@ vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint8m4_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m(
@@ -318,7 +318,7 @@ vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
- return vsext_vf4(mask, op1, vl);
+ return __riscv_vsext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m(
@@ -327,7 +327,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
- return vsext_vf4(mask, op1, vl);
+ return __riscv_vsext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m(
@@ -336,7 +336,7 @@ vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
- return vsext_vf4(mask, op1, vl);
+ return __riscv_vsext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m(
@@ -345,7 +345,7 @@ vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
- return vsext_vf4(mask, op1, vl);
+ return __riscv_vsext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m(
@@ -354,7 +354,7 @@ vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) {
- return vsext_vf4(mask, op1, vl);
+ return __riscv_vsext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m(
@@ -363,7 +363,7 @@ vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
- return vsext_vf8(mask, op1, vl);
+ return __riscv_vsext_vf8(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m(
@@ -372,7 +372,7 @@ vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
- return vsext_vf8(mask, op1, vl);
+ return __riscv_vsext_vf8(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m(
@@ -381,7 +381,7 @@ vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
- return vsext_vf8(mask, op1, vl);
+ return __riscv_vsext_vf8(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m(
@@ -390,7 +390,7 @@ vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
- return vsext_vf8(mask, op1, vl);
+ return __riscv_vsext_vf8(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m(
@@ -399,7 +399,7 @@ vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint16mf4_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m(
@@ -408,7 +408,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint16mf4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint16mf2_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m(
@@ -417,7 +417,7 @@ vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint16m1_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m(
@@ -426,7 +426,7 @@ vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint16m2_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m(
@@ -435,7 +435,7 @@ vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint16m4_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m(
@@ -444,7 +444,7 @@ vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) {
- return vsext_vf4(mask, op1, vl);
+ return __riscv_vsext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m(
@@ -453,7 +453,7 @@ vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) {
- return vsext_vf4(mask, op1, vl);
+ return __riscv_vsext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m(
@@ -462,7 +462,7 @@ vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) {
- return vsext_vf4(mask, op1, vl);
+ return __riscv_vsext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m(
@@ -471,7 +471,7 @@ vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) {
- return vsext_vf4(mask, op1, vl);
+ return __riscv_vsext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m(
@@ -480,7 +480,7 @@ vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint32mf2_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m(
@@ -489,7 +489,7 @@ vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint32m1_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m(
@@ -498,7 +498,7 @@ vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint32m2_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m(
@@ -507,6 +507,6 @@ vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint32m4_t op1, size_t vl) {
- return vsext_vf2(mask, op1, vl);
+ return __riscv_vsext_vf2(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1down.c
index 681c5cbd1dc9..0f73725e1eb1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1down.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4(
@@ -21,7 +21,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2(
@@ -30,7 +30,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1(
@@ -39,7 +39,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2(
@@ -48,7 +48,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4(
@@ -57,7 +57,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8(
@@ -66,7 +66,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4(
@@ -75,7 +75,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2(
@@ -84,7 +84,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1(
@@ -93,7 +93,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2(
@@ -102,7 +102,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4(
@@ -111,7 +111,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8(
@@ -120,7 +120,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2(
@@ -129,7 +129,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1(
@@ -138,7 +138,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2(
@@ -147,7 +147,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4(
@@ -156,7 +156,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8(
@@ -165,7 +165,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1(
@@ -174,7 +174,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2(
@@ -183,7 +183,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4(
@@ -192,7 +192,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8(
@@ -201,7 +201,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8(
@@ -210,7 +210,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4(
@@ -219,7 +219,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2(
@@ -228,7 +228,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1(
@@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2(
@@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4(
@@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8(
@@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4(
@@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2(
@@ -282,7 +282,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1(
@@ -291,7 +291,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2(
@@ -300,7 +300,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4(
@@ -309,7 +309,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8(
@@ -318,7 +318,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2(
@@ -327,7 +327,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1(
@@ -336,7 +336,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2(
@@ -345,7 +345,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4(
@@ -354,7 +354,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8(
@@ -363,7 +363,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1(
@@ -372,7 +372,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2(
@@ -381,7 +381,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4(
@@ -390,7 +390,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1down(src, value, vl);
+ return __riscv_vslide1down(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t va
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m(
@@ -426,7 +426,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t va
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m(
@@ -435,7 +435,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t va
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m(
@@ -444,7 +444,7 @@ vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m(
@@ -453,7 +453,7 @@ vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m(
@@ -462,7 +462,7 @@ vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m(
@@ -471,7 +471,7 @@ vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m(
@@ -480,7 +480,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m(
@@ -489,7 +489,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m(
@@ -498,7 +498,7 @@ vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m(
@@ -507,7 +507,7 @@ vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t va
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m(
@@ -516,7 +516,7 @@ vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t va
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m(
@@ -525,7 +525,7 @@ vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t va
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m(
@@ -534,7 +534,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m(
@@ -543,7 +543,7 @@ vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m(
@@ -552,7 +552,7 @@ vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m(
@@ -561,7 +561,7 @@ vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t va
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m(
@@ -570,7 +570,7 @@ vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t va
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m(
@@ -579,7 +579,7 @@ vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m(
@@ -588,7 +588,7 @@ vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m(
@@ -597,7 +597,7 @@ vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m(
@@ -606,7 +606,7 @@ vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t va
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m(
@@ -615,7 +615,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m(
@@ -624,7 +624,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m(
@@ -633,7 +633,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m(
@@ -642,7 +642,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m(
@@ -651,7 +651,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t val
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m(
@@ -660,7 +660,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t val
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m(
@@ -669,7 +669,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t val
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m(
@@ -696,7 +696,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m(
@@ -705,7 +705,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m(
@@ -714,7 +714,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m(
@@ -723,7 +723,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m(
@@ -732,7 +732,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m(
@@ -741,7 +741,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m(
@@ -750,7 +750,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m(
@@ -759,7 +759,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m(
@@ -768,7 +768,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m(
@@ -777,7 +777,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m(
@@ -786,7 +786,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1down(mask, src, value, vl);
+ return __riscv_vslide1down(mask, src, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1up.c
index 28875a552383..e89400f840e1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1up.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4(
@@ -21,7 +21,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2(
@@ -30,7 +30,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1(
@@ -39,7 +39,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2(
@@ -48,7 +48,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4(
@@ -57,7 +57,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8(
@@ -66,7 +66,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4(
@@ -75,7 +75,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2(
@@ -84,7 +84,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1(
@@ -93,7 +93,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2(
@@ -102,7 +102,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4(
@@ -111,7 +111,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8(
@@ -120,7 +120,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2(
@@ -129,7 +129,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1(
@@ -138,7 +138,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2(
@@ -147,7 +147,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4(
@@ -156,7 +156,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8(
@@ -165,7 +165,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1(
@@ -174,7 +174,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2(
@@ -183,7 +183,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4(
@@ -192,7 +192,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8(
@@ -201,7 +201,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8(
@@ -210,7 +210,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4(
@@ -219,7 +219,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2(
@@ -228,7 +228,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1(
@@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2(
@@ -246,7 +246,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4(
@@ -255,7 +255,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8(
@@ -264,7 +264,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4(
@@ -273,7 +273,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2(
@@ -282,7 +282,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1(
@@ -291,7 +291,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2(
@@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4(
@@ -309,7 +309,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8(
@@ -318,7 +318,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2(
@@ -327,7 +327,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1(
@@ -336,7 +336,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2(
@@ -345,7 +345,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4(
@@ -354,7 +354,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8(
@@ -363,7 +363,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1(
@@ -372,7 +372,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2(
@@ -381,7 +381,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4(
@@ -390,7 +390,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1up(src, value, vl);
+ return __riscv_vslide1up(src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t valu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m(
@@ -426,7 +426,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t valu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m(
@@ -435,7 +435,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t valu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m(
@@ -444,7 +444,7 @@ vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m(
@@ -453,7 +453,7 @@ vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m(
@@ -462,7 +462,7 @@ vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m(
@@ -471,7 +471,7 @@ vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m(
@@ -480,7 +480,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m(
@@ -489,7 +489,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m(
@@ -498,7 +498,7 @@ vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m(
@@ -507,7 +507,7 @@ vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t valu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m(
@@ -516,7 +516,7 @@ vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t valu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m(
@@ -525,7 +525,7 @@ vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t valu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m(
@@ -534,7 +534,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m(
@@ -543,7 +543,7 @@ vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t val
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m(
@@ -552,7 +552,7 @@ vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m(
@@ -561,7 +561,7 @@ vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t valu
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m(
@@ -570,7 +570,7 @@ vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t valu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m(
@@ -579,7 +579,7 @@ vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t val
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m(
@@ -588,7 +588,7 @@ vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t val
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m(
@@ -597,7 +597,7 @@ vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m(
@@ -606,7 +606,7 @@ vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t valu
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m(
@@ -615,7 +615,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m(
@@ -624,7 +624,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m(
@@ -633,7 +633,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m(
@@ -642,7 +642,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m(
@@ -651,7 +651,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m(
@@ -660,7 +660,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m(
@@ -669,7 +669,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m(
@@ -696,7 +696,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m(
@@ -705,7 +705,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m(
@@ -714,7 +714,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m(
@@ -723,7 +723,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m(
@@ -732,7 +732,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m(
@@ -741,7 +741,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m(
@@ -750,7 +750,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m(
@@ -759,7 +759,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m(
@@ -768,7 +768,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m(
@@ -777,7 +777,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m(
@@ -786,7 +786,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1up(mask, src, value, vl);
+ return __riscv_vslide1up(mask, src, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c
index 652014b01a4a..b79ed3230e39 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
@@ -157,7 +157,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
@@ -166,7 +166,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
@@ -175,7 +175,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
@@ -184,7 +184,7 @@ vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
@@ -193,7 +193,7 @@ vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
@@ -202,7 +202,7 @@ vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
@@ -211,7 +211,7 @@ vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
@@ -220,7 +220,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
@@ -229,7 +229,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
@@ -238,7 +238,7 @@ vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
@@ -247,7 +247,7 @@ vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
@@ -256,7 +256,7 @@ vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
@@ -265,7 +265,7 @@ vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
@@ -274,7 +274,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
@@ -292,7 +292,7 @@ vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
@@ -301,7 +301,7 @@ vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
@@ -310,7 +310,7 @@ vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
@@ -319,7 +319,7 @@ vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
@@ -328,7 +328,7 @@ vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
@@ -337,7 +337,7 @@ vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
@@ -346,7 +346,7 @@ vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
@@ -382,7 +382,7 @@ vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
@@ -391,7 +391,7 @@ vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
@@ -400,7 +400,7 @@ vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
@@ -409,7 +409,7 @@ vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
@@ -436,7 +436,7 @@ vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
@@ -445,7 +445,7 @@ vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
@@ -454,7 +454,7 @@ vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
@@ -463,7 +463,7 @@ vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
@@ -481,7 +481,7 @@ vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
@@ -490,7 +490,7 @@ vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
@@ -499,7 +499,7 @@ vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
@@ -508,7 +508,7 @@ vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
@@ -517,7 +517,7 @@ vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
@@ -526,7 +526,7 @@ vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
@@ -535,7 +535,7 @@ vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) {
- return vslidedown(src, offset, vl);
+ return __riscv_vslidedown(src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m(
@@ -544,7 +544,7 @@ vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m(
@@ -553,7 +553,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m(
@@ -562,7 +562,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m(
@@ -571,7 +571,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m(
@@ -580,7 +580,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
@@ -598,7 +598,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
@@ -607,7 +607,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
@@ -616,7 +616,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
@@ -625,7 +625,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
@@ -634,7 +634,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
@@ -643,7 +643,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
@@ -652,7 +652,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
@@ -661,7 +661,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
@@ -670,7 +670,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
@@ -679,7 +679,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
@@ -688,7 +688,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
@@ -697,7 +697,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
@@ -706,7 +706,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
@@ -715,7 +715,7 @@ vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
@@ -724,7 +724,7 @@ vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
@@ -733,7 +733,7 @@ vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
@@ -742,7 +742,7 @@ vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
@@ -751,7 +751,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
@@ -760,7 +760,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
@@ -769,7 +769,7 @@ vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
@@ -778,7 +778,7 @@ vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
@@ -787,7 +787,7 @@ vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
@@ -796,7 +796,7 @@ vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
@@ -805,7 +805,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
@@ -814,7 +814,7 @@ vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
@@ -823,7 +823,7 @@ vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
@@ -832,7 +832,7 @@ vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
@@ -841,7 +841,7 @@ vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
@@ -850,7 +850,7 @@ vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
@@ -859,7 +859,7 @@ vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
@@ -868,7 +868,7 @@ vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
@@ -877,7 +877,7 @@ vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
@@ -886,7 +886,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, size_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
@@ -895,7 +895,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, size_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
@@ -904,7 +904,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, size_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
@@ -913,7 +913,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, size_t offse
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
@@ -922,7 +922,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, size_t offse
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
@@ -931,7 +931,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, size_t offse
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
@@ -940,7 +940,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, size_t offse
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
@@ -967,7 +967,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, size_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
@@ -976,7 +976,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, size_t of
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
@@ -985,7 +985,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, size_t of
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
@@ -994,7 +994,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, size_t of
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
@@ -1003,7 +1003,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
@@ -1012,7 +1012,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, size_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
@@ -1021,7 +1021,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, size_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
@@ -1030,7 +1030,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, size_t of
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
@@ -1039,7 +1039,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, size_t of
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
@@ -1048,7 +1048,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, size_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
@@ -1057,7 +1057,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, size_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
@@ -1066,6 +1066,6 @@ vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, size_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) {
- return vslidedown(mask, src, offset, vl);
+ return __riscv_vslidedown(mask, src, offset, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c
index 3e55a02d0610..557d7503d4c1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m1(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m2(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m4(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m8(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
@@ -157,7 +157,7 @@ vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
@@ -166,7 +166,7 @@ vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
@@ -175,7 +175,7 @@ vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
@@ -184,7 +184,7 @@ vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
@@ -193,7 +193,7 @@ vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
@@ -202,7 +202,7 @@ vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
@@ -211,7 +211,7 @@ vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
@@ -220,7 +220,7 @@ vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t of
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
@@ -229,7 +229,7 @@ vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t of
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
@@ -238,7 +238,7 @@ vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
@@ -247,7 +247,7 @@ vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
@@ -256,7 +256,7 @@ vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
@@ -265,7 +265,7 @@ vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
@@ -274,7 +274,7 @@ vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t of
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
@@ -283,7 +283,7 @@ vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
@@ -292,7 +292,7 @@ vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
@@ -301,7 +301,7 @@ vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
@@ -310,7 +310,7 @@ vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
@@ -319,7 +319,7 @@ vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
@@ -328,7 +328,7 @@ vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
@@ -337,7 +337,7 @@ vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
@@ -346,7 +346,7 @@ vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
@@ -382,7 +382,7 @@ vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
@@ -391,7 +391,7 @@ vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
@@ -400,7 +400,7 @@ vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
@@ -409,7 +409,7 @@ vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
@@ -436,7 +436,7 @@ vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
@@ -445,7 +445,7 @@ vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
@@ -454,7 +454,7 @@ vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
@@ -463,7 +463,7 @@ vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
@@ -481,7 +481,7 @@ vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
@@ -490,7 +490,7 @@ vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
@@ -499,7 +499,7 @@ vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
@@ -508,7 +508,7 @@ vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
@@ -517,7 +517,7 @@ vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
@@ -526,7 +526,7 @@ vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
@@ -535,7 +535,7 @@ vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
- return vslideup(dest, src, offset, vl);
+ return __riscv_vslideup(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_m(
@@ -544,7 +544,7 @@ vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_m(
@@ -553,7 +553,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_m(
@@ -562,7 +562,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_m(
@@ -571,7 +571,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_m(
@@ -580,7 +580,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_m(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m(
@@ -598,7 +598,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m(
@@ -607,7 +607,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m(
@@ -616,7 +616,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m(
@@ -625,7 +625,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m(
@@ -634,7 +634,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m(
@@ -643,7 +643,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m(
@@ -652,7 +652,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m(
@@ -661,7 +661,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m(
@@ -670,7 +670,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m(
@@ -679,7 +679,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m(
@@ -688,7 +688,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m(
@@ -697,7 +697,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m(
@@ -706,7 +706,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m(
@@ -715,7 +715,7 @@ vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m(
@@ -724,7 +724,7 @@ vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m(
@@ -733,7 +733,7 @@ vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m(
@@ -742,7 +742,7 @@ vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m(
@@ -751,7 +751,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m(
@@ -760,7 +760,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m(
@@ -769,7 +769,7 @@ vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m(
@@ -778,7 +778,7 @@ vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m(
@@ -787,7 +787,7 @@ vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m(
@@ -796,7 +796,7 @@ vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m(
@@ -805,7 +805,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m(
@@ -814,7 +814,7 @@ vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m(
@@ -823,7 +823,7 @@ vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m(
@@ -832,7 +832,7 @@ vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m(
@@ -841,7 +841,7 @@ vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m(
@@ -850,7 +850,7 @@ vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m(
@@ -859,7 +859,7 @@ vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m(
@@ -868,7 +868,7 @@ vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m(
@@ -877,7 +877,7 @@ vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m(
@@ -886,7 +886,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m(
@@ -895,7 +895,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m(
@@ -904,7 +904,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m(
@@ -913,7 +913,7 @@ vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m(
@@ -922,7 +922,7 @@ vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t sr
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m(
@@ -931,7 +931,7 @@ vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t sr
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m(
@@ -940,7 +940,7 @@ vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t sr
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m(
@@ -967,7 +967,7 @@ vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m(
@@ -976,7 +976,7 @@ vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m(
@@ -985,7 +985,7 @@ vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m(
@@ -994,7 +994,7 @@ vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m(
@@ -1003,7 +1003,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m(
@@ -1012,7 +1012,7 @@ vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m(
@@ -1021,7 +1021,7 @@ vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m(
@@ -1030,7 +1030,7 @@ vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m(
@@ -1039,7 +1039,7 @@ vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m(
@@ -1048,7 +1048,7 @@ vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m(
@@ -1057,7 +1057,7 @@ vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m(
@@ -1066,6 +1066,6 @@ vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
- return vslideup(mask, dest, src, offset, vl);
+ return __riscv_vslideup(mask, dest, src, offset, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsll.c
index e6a048154516..28d175b0ba63 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsll.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsll.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
@@ -408,7 +408,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
@@ -795,7 +795,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
- return vsll(op1, shift, vl);
+ return __riscv_vsll(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m(
@@ -804,7 +804,7 @@ vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m(
@@ -813,7 +813,7 @@ vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shif
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m(
@@ -822,7 +822,7 @@ vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m(
@@ -831,7 +831,7 @@ vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m(
@@ -840,7 +840,7 @@ vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m(
@@ -849,7 +849,7 @@ vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m(
@@ -858,7 +858,7 @@ vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m(
@@ -867,7 +867,7 @@ vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m(
@@ -876,7 +876,7 @@ vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m(
@@ -885,7 +885,7 @@ vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m(
@@ -894,7 +894,7 @@ vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m(
@@ -903,7 +903,7 @@ vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m(
@@ -912,7 +912,7 @@ vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m(
@@ -921,7 +921,7 @@ vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m(
@@ -930,7 +930,7 @@ vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m(
@@ -939,7 +939,7 @@ vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m(
@@ -948,7 +948,7 @@ vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m(
@@ -957,7 +957,7 @@ vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m(
@@ -966,7 +966,7 @@ vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m(
@@ -975,7 +975,7 @@ vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m(
@@ -984,7 +984,7 @@ vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m(
@@ -993,7 +993,7 @@ vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shif
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsll(mask, op1, shift, vl);
+ return __riscv_vsll(mask, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm.c
index 9059ce7f9248..b9e1defd1cc9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsm_v_b1(uint8_t *base, vbool1_t value, size_t vl) {
- return vsm(base, value, vl);
+ return __riscv_vsm(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vsm_v_b2(
@@ -21,7 +21,7 @@ void test_vsm_v_b1(uint8_t *base, vbool1_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vsm_v_b2(uint8_t *base, vbool2_t value, size_t vl) {
- return vsm(base, value, vl);
+ return __riscv_vsm(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vsm_v_b4(
@@ -30,7 +30,7 @@ void test_vsm_v_b2(uint8_t *base, vbool2_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vsm_v_b4(uint8_t *base, vbool4_t value, size_t vl) {
- return vsm(base, value, vl);
+ return __riscv_vsm(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vsm_v_b8(
@@ -39,7 +39,7 @@ void test_vsm_v_b4(uint8_t *base, vbool4_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vsm_v_b8(uint8_t *base, vbool8_t value, size_t vl) {
- return vsm(base, value, vl);
+ return __riscv_vsm(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vsm_v_b16(
@@ -48,7 +48,7 @@ void test_vsm_v_b8(uint8_t *base, vbool8_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vsm_v_b16(uint8_t *base, vbool16_t value, size_t vl) {
- return vsm(base, value, vl);
+ return __riscv_vsm(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vsm_v_b32(
@@ -57,7 +57,7 @@ void test_vsm_v_b16(uint8_t *base, vbool16_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vsm_v_b32(uint8_t *base, vbool32_t value, size_t vl) {
- return vsm(base, value, vl);
+ return __riscv_vsm(base, value, vl);
}
// CHECK-RV64-LABEL: @test_vsm_v_b64(
@@ -66,6 +66,6 @@ void test_vsm_v_b32(uint8_t *base, vbool32_t value, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vsm_v_b64(uint8_t *base, vbool64_t value, size_t vl) {
- return vsm(base, value, vl);
+ return __riscv_vsm(base, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c
index e7208e402af2..126351b4a0c6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei16.c
index d7910d357546..15ff9bfd56b7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsoxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8(
@@ -58,7 +58,7 @@ void test_vsoxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2(
@@ -67,7 +67,7 @@ void test_vsoxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1(
@@ -76,7 +76,7 @@ void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2(
@@ -85,7 +85,7 @@ void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4(
@@ -94,7 +94,7 @@ void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8(
@@ -103,7 +103,7 @@ void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1(
@@ -112,7 +112,7 @@ void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2(
@@ -121,7 +121,7 @@ void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4(
@@ -130,7 +130,7 @@ void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8(
@@ -139,7 +139,7 @@ void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8(
@@ -148,7 +148,7 @@ void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4(
@@ -157,7 +157,7 @@ void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2(
@@ -166,7 +166,7 @@ void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1(
@@ -175,7 +175,7 @@ void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2(
@@ -184,7 +184,7 @@ void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4(
@@ -193,7 +193,7 @@ void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4(
@@ -202,7 +202,7 @@ void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2(
@@ -211,7 +211,7 @@ void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1(
@@ -220,7 +220,7 @@ void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2(
@@ -229,7 +229,7 @@ void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4(
@@ -238,7 +238,7 @@ void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8(
@@ -247,7 +247,7 @@ void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2(
@@ -256,7 +256,7 @@ void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1(
@@ -265,7 +265,7 @@ void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2(
@@ -274,7 +274,7 @@ void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4(
@@ -283,7 +283,7 @@ void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8(
@@ -292,7 +292,7 @@ void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1(
@@ -301,7 +301,7 @@ void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2(
@@ -310,7 +310,7 @@ void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4(
@@ -319,7 +319,7 @@ void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8(
@@ -328,7 +328,7 @@ void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8(
@@ -337,7 +337,7 @@ void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2(
@@ -355,7 +355,7 @@ void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1(
@@ -364,7 +364,7 @@ void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2(
@@ -373,7 +373,7 @@ void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4(
@@ -382,7 +382,7 @@ void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4(
@@ -391,7 +391,7 @@ void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2(
@@ -400,7 +400,7 @@ void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1(
@@ -409,7 +409,7 @@ void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2(
@@ -418,7 +418,7 @@ void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4(
@@ -427,7 +427,7 @@ void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8(
@@ -436,7 +436,7 @@ void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2(
@@ -445,7 +445,7 @@ void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1(
@@ -454,7 +454,7 @@ void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2(
@@ -463,7 +463,7 @@ void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4(
@@ -472,7 +472,7 @@ void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8(
@@ -481,7 +481,7 @@ void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1(
@@ -490,7 +490,7 @@ void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2(
@@ -499,7 +499,7 @@ void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4(
@@ -508,7 +508,7 @@ void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8(
@@ -517,7 +517,7 @@ void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
- return vsoxei16(base, bindex, value, vl);
+ return __riscv_vsoxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4_m(
@@ -526,7 +526,7 @@ void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2_m(
@@ -535,7 +535,7 @@ void test_vsoxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1_m(
@@ -544,7 +544,7 @@ void test_vsoxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2_m(
@@ -553,7 +553,7 @@ void test_vsoxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4_m(
@@ -562,7 +562,7 @@ void test_vsoxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8_m(
@@ -571,7 +571,7 @@ void test_vsoxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m(
@@ -580,7 +580,7 @@ void test_vsoxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m(
@@ -589,7 +589,7 @@ void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m(
@@ -598,7 +598,7 @@ void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m(
@@ -607,7 +607,7 @@ void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m(
@@ -616,7 +616,7 @@ void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1_m(
@@ -625,7 +625,7 @@ void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2_m(
@@ -634,7 +634,7 @@ void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4_m(
@@ -643,7 +643,7 @@ void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8_m(
@@ -652,7 +652,7 @@ void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8_m(
@@ -661,7 +661,7 @@ void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4_m(
@@ -670,7 +670,7 @@ void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2_m(
@@ -679,7 +679,7 @@ void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2_m(
@@ -697,7 +697,7 @@ void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4_m(
@@ -706,7 +706,7 @@ void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4_m(
@@ -715,7 +715,7 @@ void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2_m(
@@ -724,7 +724,7 @@ void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1_m(
@@ -733,7 +733,7 @@ void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2_m(
@@ -742,7 +742,7 @@ void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4_m(
@@ -751,7 +751,7 @@ void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8_m(
@@ -760,7 +760,7 @@ void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2_m(
@@ -769,7 +769,7 @@ void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1_m(
@@ -778,7 +778,7 @@ void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2_m(
@@ -787,7 +787,7 @@ void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4_m(
@@ -796,7 +796,7 @@ void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8_m(
@@ -805,7 +805,7 @@ void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1_m(
@@ -814,7 +814,7 @@ void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2_m(
@@ -823,7 +823,7 @@ void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4_m(
@@ -832,7 +832,7 @@ void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8_m(
@@ -841,7 +841,7 @@ void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8_m(
@@ -850,7 +850,7 @@ void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4_m(
@@ -859,7 +859,7 @@ void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2_m(
@@ -868,7 +868,7 @@ void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1_m(
@@ -877,7 +877,7 @@ void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2_m(
@@ -886,7 +886,7 @@ void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4_m(
@@ -895,7 +895,7 @@ void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4_m(
@@ -904,7 +904,7 @@ void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2_m(
@@ -913,7 +913,7 @@ void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1_m(
@@ -922,7 +922,7 @@ void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2_m(
@@ -931,7 +931,7 @@ void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4_m(
@@ -940,7 +940,7 @@ void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8_m(
@@ -949,7 +949,7 @@ void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2_m(
@@ -958,7 +958,7 @@ void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1_m(
@@ -967,7 +967,7 @@ void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2_m(
@@ -976,7 +976,7 @@ void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4_m(
@@ -985,7 +985,7 @@ void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8_m(
@@ -994,7 +994,7 @@ void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1_m(
@@ -1003,7 +1003,7 @@ void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2_m(
@@ -1012,7 +1012,7 @@ void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4_m(
@@ -1021,7 +1021,7 @@ void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8_m(
@@ -1030,6 +1030,6 @@ void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
- return vsoxei16(mask, base, bindex, value, vl);
+ return __riscv_vsoxei16(mask, base, bindex, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei32.c
index 8cc8df54f6f6..203ba83ed8e3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsoxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2(
@@ -58,7 +58,7 @@ void test_vsoxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1(
@@ -67,7 +67,7 @@ void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2(
@@ -76,7 +76,7 @@ void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4(
@@ -85,7 +85,7 @@ void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8(
@@ -94,7 +94,7 @@ void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1(
@@ -103,7 +103,7 @@ void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2(
@@ -112,7 +112,7 @@ void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4(
@@ -121,7 +121,7 @@ void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8(
@@ -130,7 +130,7 @@ void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8(
@@ -139,7 +139,7 @@ void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4(
@@ -148,7 +148,7 @@ void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2(
@@ -157,7 +157,7 @@ void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1(
@@ -166,7 +166,7 @@ void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2(
@@ -175,7 +175,7 @@ void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4(
@@ -184,7 +184,7 @@ void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2(
@@ -193,7 +193,7 @@ void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1(
@@ -202,7 +202,7 @@ void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2(
@@ -211,7 +211,7 @@ void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4(
@@ -220,7 +220,7 @@ void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2(
@@ -229,7 +229,7 @@ void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1(
@@ -238,7 +238,7 @@ void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2(
@@ -247,7 +247,7 @@ void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4(
@@ -256,7 +256,7 @@ void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8(
@@ -265,7 +265,7 @@ void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1(
@@ -274,7 +274,7 @@ void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2(
@@ -283,7 +283,7 @@ void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4(
@@ -292,7 +292,7 @@ void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8(
@@ -301,7 +301,7 @@ void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8(
@@ -310,7 +310,7 @@ void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4(
@@ -319,7 +319,7 @@ void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2(
@@ -328,7 +328,7 @@ void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1(
@@ -337,7 +337,7 @@ void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2(
@@ -346,7 +346,7 @@ void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4(
@@ -355,7 +355,7 @@ void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2(
@@ -364,7 +364,7 @@ void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1(
@@ -373,7 +373,7 @@ void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2(
@@ -382,7 +382,7 @@ void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4(
@@ -391,7 +391,7 @@ void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2(
@@ -400,7 +400,7 @@ void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1(
@@ -409,7 +409,7 @@ void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2(
@@ -418,7 +418,7 @@ void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4(
@@ -427,7 +427,7 @@ void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8(
@@ -436,7 +436,7 @@ void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1(
@@ -445,7 +445,7 @@ void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2(
@@ -454,7 +454,7 @@ void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4(
@@ -463,7 +463,7 @@ void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8(
@@ -472,7 +472,7 @@ void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
- return vsoxei32(base, bindex, value, vl);
+ return __riscv_vsoxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4_m(
@@ -481,7 +481,7 @@ void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2_m(
@@ -490,7 +490,7 @@ void test_vsoxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1_m(
@@ -499,7 +499,7 @@ void test_vsoxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2_m(
@@ -508,7 +508,7 @@ void test_vsoxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4_m(
@@ -517,7 +517,7 @@ void test_vsoxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2_m(
@@ -526,7 +526,7 @@ void test_vsoxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1_m(
@@ -535,7 +535,7 @@ void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2_m(
@@ -544,7 +544,7 @@ void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4_m(
@@ -553,7 +553,7 @@ void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8_m(
@@ -562,7 +562,7 @@ void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1_m(
@@ -571,7 +571,7 @@ void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2_m(
@@ -580,7 +580,7 @@ void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4_m(
@@ -589,7 +589,7 @@ void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8_m(
@@ -598,7 +598,7 @@ void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8_m(
@@ -607,7 +607,7 @@ void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4_m(
@@ -616,7 +616,7 @@ void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2_m(
@@ -625,7 +625,7 @@ void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1_m(
@@ -634,7 +634,7 @@ void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2_m(
@@ -643,7 +643,7 @@ void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4_m(
@@ -652,7 +652,7 @@ void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2_m(
@@ -661,7 +661,7 @@ void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1_m(
@@ -670,7 +670,7 @@ void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2_m(
@@ -679,7 +679,7 @@ void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4_m(
@@ -688,7 +688,7 @@ void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2_m(
@@ -697,7 +697,7 @@ void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1_m(
@@ -706,7 +706,7 @@ void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2_m(
@@ -715,7 +715,7 @@ void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4_m(
@@ -724,7 +724,7 @@ void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8_m(
@@ -733,7 +733,7 @@ void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1_m(
@@ -742,7 +742,7 @@ void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2_m(
@@ -751,7 +751,7 @@ void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4_m(
@@ -760,7 +760,7 @@ void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8_m(
@@ -769,7 +769,7 @@ void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8_m(
@@ -778,7 +778,7 @@ void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4_m(
@@ -787,7 +787,7 @@ void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2_m(
@@ -796,7 +796,7 @@ void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1_m(
@@ -805,7 +805,7 @@ void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2_m(
@@ -814,7 +814,7 @@ void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4_m(
@@ -823,7 +823,7 @@ void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2_m(
@@ -832,7 +832,7 @@ void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1_m(
@@ -841,7 +841,7 @@ void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2_m(
@@ -850,7 +850,7 @@ void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4_m(
@@ -859,7 +859,7 @@ void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2_m(
@@ -868,7 +868,7 @@ void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1_m(
@@ -877,7 +877,7 @@ void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2_m(
@@ -886,7 +886,7 @@ void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4_m(
@@ -895,7 +895,7 @@ void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8_m(
@@ -904,7 +904,7 @@ void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1_m(
@@ -913,7 +913,7 @@ void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2_m(
@@ -922,7 +922,7 @@ void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4_m(
@@ -931,7 +931,7 @@ void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8_m(
@@ -940,6 +940,6 @@ void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
- return vsoxei32(mask, base, bindex, value, vl);
+ return __riscv_vsoxei32(mask, base, bindex, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei64.c
index 545584d420e7..68e68b595427 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4(
@@ -76,7 +76,7 @@ void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1(
@@ -85,7 +85,7 @@ void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2(
@@ -94,7 +94,7 @@ void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4(
@@ -103,7 +103,7 @@ void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8(
@@ -112,7 +112,7 @@ void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8(
@@ -121,7 +121,7 @@ void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4(
@@ -130,7 +130,7 @@ void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2(
@@ -139,7 +139,7 @@ void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1(
@@ -148,7 +148,7 @@ void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4(
@@ -157,7 +157,7 @@ void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2(
@@ -166,7 +166,7 @@ void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1(
@@ -175,7 +175,7 @@ void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2(
@@ -184,7 +184,7 @@ void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2(
@@ -193,7 +193,7 @@ void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1(
@@ -202,7 +202,7 @@ void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2(
@@ -211,7 +211,7 @@ void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4(
@@ -220,7 +220,7 @@ void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1(
@@ -229,7 +229,7 @@ void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2(
@@ -238,7 +238,7 @@ void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4(
@@ -247,7 +247,7 @@ void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8(
@@ -256,7 +256,7 @@ void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8(
@@ -265,7 +265,7 @@ void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4(
@@ -274,7 +274,7 @@ void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2(
@@ -283,7 +283,7 @@ void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1(
@@ -292,7 +292,7 @@ void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4(
@@ -301,7 +301,7 @@ void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2(
@@ -310,7 +310,7 @@ void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1(
@@ -319,7 +319,7 @@ void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2(
@@ -328,7 +328,7 @@ void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2(
@@ -337,7 +337,7 @@ void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1(
@@ -346,7 +346,7 @@ void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2(
@@ -355,7 +355,7 @@ void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4(
@@ -364,7 +364,7 @@ void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1(
@@ -373,7 +373,7 @@ void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2(
@@ -382,7 +382,7 @@ void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4(
@@ -391,7 +391,7 @@ void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8(
@@ -400,7 +400,7 @@ void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
- return vsoxei64(base, bindex, value, vl);
+ return __riscv_vsoxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4_m(
@@ -409,7 +409,7 @@ void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2_m(
@@ -418,7 +418,7 @@ void test_vsoxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1_m(
@@ -427,7 +427,7 @@ void test_vsoxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2_m(
@@ -436,7 +436,7 @@ void test_vsoxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2_m(
@@ -445,7 +445,7 @@ void test_vsoxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1_m(
@@ -454,7 +454,7 @@ void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2_m(
@@ -463,7 +463,7 @@ void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4_m(
@@ -472,7 +472,7 @@ void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1_m(
@@ -481,7 +481,7 @@ void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2_m(
@@ -490,7 +490,7 @@ void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4_m(
@@ -499,7 +499,7 @@ void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8_m(
@@ -508,7 +508,7 @@ void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8_m(
@@ -517,7 +517,7 @@ void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4_m(
@@ -526,7 +526,7 @@ void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2_m(
@@ -535,7 +535,7 @@ void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1_m(
@@ -544,7 +544,7 @@ void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4_m(
@@ -553,7 +553,7 @@ void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2_m(
@@ -562,7 +562,7 @@ void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1_m(
@@ -571,7 +571,7 @@ void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2_m(
@@ -580,7 +580,7 @@ void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2_m(
@@ -589,7 +589,7 @@ void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1_m(
@@ -598,7 +598,7 @@ void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2_m(
@@ -607,7 +607,7 @@ void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4_m(
@@ -616,7 +616,7 @@ void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1_m(
@@ -625,7 +625,7 @@ void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2_m(
@@ -634,7 +634,7 @@ void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4_m(
@@ -643,7 +643,7 @@ void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8_m(
@@ -652,7 +652,7 @@ void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8_m(
@@ -661,7 +661,7 @@ void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4_m(
@@ -670,7 +670,7 @@ void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2_m(
@@ -679,7 +679,7 @@ void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1_m(
@@ -688,7 +688,7 @@ void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4_m(
@@ -697,7 +697,7 @@ void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2_m(
@@ -706,7 +706,7 @@ void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1_m(
@@ -715,7 +715,7 @@ void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2_m(
@@ -724,7 +724,7 @@ void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2_m(
@@ -733,7 +733,7 @@ void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1_m(
@@ -742,7 +742,7 @@ void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2_m(
@@ -751,7 +751,7 @@ void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4_m(
@@ -760,7 +760,7 @@ void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1_m(
@@ -769,7 +769,7 @@ void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2_m(
@@ -778,7 +778,7 @@ void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4_m(
@@ -787,7 +787,7 @@ void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8_m(
@@ -796,6 +796,6 @@ void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
- return vsoxei64(mask, base, bindex, value, vl);
+ return __riscv_vsoxei64(mask, base, bindex, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei8.c
index 1d3139f94b8c..a770e0e9f682 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsoxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8(
@@ -58,7 +58,7 @@ void test_vsoxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2(
@@ -67,7 +67,7 @@ void test_vsoxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1(
@@ -76,7 +76,7 @@ void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2(
@@ -85,7 +85,7 @@ void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4(
@@ -94,7 +94,7 @@ void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8(
@@ -103,7 +103,7 @@ void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1(
@@ -112,7 +112,7 @@ void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2(
@@ -121,7 +121,7 @@ void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4(
@@ -130,7 +130,7 @@ void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8(
@@ -139,7 +139,7 @@ void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8(
@@ -148,7 +148,7 @@ void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4(
@@ -157,7 +157,7 @@ void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2(
@@ -166,7 +166,7 @@ void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1(
@@ -175,7 +175,7 @@ void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2(
@@ -184,7 +184,7 @@ void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4(
@@ -193,7 +193,7 @@ void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8(
@@ -202,7 +202,7 @@ void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4(
@@ -211,7 +211,7 @@ void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2(
@@ -220,7 +220,7 @@ void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1(
@@ -229,7 +229,7 @@ void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2(
@@ -238,7 +238,7 @@ void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4(
@@ -247,7 +247,7 @@ void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8(
@@ -256,7 +256,7 @@ void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2(
@@ -265,7 +265,7 @@ void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1(
@@ -274,7 +274,7 @@ void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2(
@@ -283,7 +283,7 @@ void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4(
@@ -292,7 +292,7 @@ void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8(
@@ -301,7 +301,7 @@ void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1(
@@ -310,7 +310,7 @@ void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2(
@@ -319,7 +319,7 @@ void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4(
@@ -328,7 +328,7 @@ void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8(
@@ -337,7 +337,7 @@ void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8(
@@ -346,7 +346,7 @@ void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4(
@@ -355,7 +355,7 @@ void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2(
@@ -364,7 +364,7 @@ void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1(
@@ -373,7 +373,7 @@ void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2(
@@ -382,7 +382,7 @@ void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4(
@@ -391,7 +391,7 @@ void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8(
@@ -400,7 +400,7 @@ void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4(
@@ -409,7 +409,7 @@ void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2(
@@ -418,7 +418,7 @@ void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1(
@@ -427,7 +427,7 @@ void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2(
@@ -436,7 +436,7 @@ void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4(
@@ -445,7 +445,7 @@ void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8(
@@ -454,7 +454,7 @@ void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2(
@@ -463,7 +463,7 @@ void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1(
@@ -472,7 +472,7 @@ void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2(
@@ -481,7 +481,7 @@ void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4(
@@ -490,7 +490,7 @@ void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8(
@@ -499,7 +499,7 @@ void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1(
@@ -508,7 +508,7 @@ void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2(
@@ -517,7 +517,7 @@ void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4(
@@ -526,7 +526,7 @@ void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8(
@@ -535,7 +535,7 @@ void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
- return vsoxei8(base, bindex, value, vl);
+ return __riscv_vsoxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4_m(
@@ -544,7 +544,7 @@ void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2_m(
@@ -553,7 +553,7 @@ void test_vsoxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1_m(
@@ -562,7 +562,7 @@ void test_vsoxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2_m(
@@ -571,7 +571,7 @@ void test_vsoxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4_m(
@@ -580,7 +580,7 @@ void test_vsoxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8_m(
@@ -589,7 +589,7 @@ void test_vsoxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m(
@@ -598,7 +598,7 @@ void test_vsoxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m(
@@ -607,7 +607,7 @@ void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m(
@@ -616,7 +616,7 @@ void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m(
@@ -625,7 +625,7 @@ void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m(
@@ -634,7 +634,7 @@ void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1_m(
@@ -643,7 +643,7 @@ void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2_m(
@@ -652,7 +652,7 @@ void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4_m(
@@ -661,7 +661,7 @@ void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8_m(
@@ -670,7 +670,7 @@ void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8_m(
@@ -679,7 +679,7 @@ void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4_m(
@@ -688,7 +688,7 @@ void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2_m(
@@ -697,7 +697,7 @@ void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1_m(
@@ -706,7 +706,7 @@ void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2_m(
@@ -715,7 +715,7 @@ void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4_m(
@@ -724,7 +724,7 @@ void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8_m(
@@ -733,7 +733,7 @@ void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4_m(
@@ -742,7 +742,7 @@ void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2_m(
@@ -751,7 +751,7 @@ void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1_m(
@@ -760,7 +760,7 @@ void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2_m(
@@ -769,7 +769,7 @@ void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4_m(
@@ -778,7 +778,7 @@ void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8_m(
@@ -787,7 +787,7 @@ void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2_m(
@@ -796,7 +796,7 @@ void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1_m(
@@ -805,7 +805,7 @@ void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2_m(
@@ -814,7 +814,7 @@ void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4_m(
@@ -823,7 +823,7 @@ void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8_m(
@@ -832,7 +832,7 @@ void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1_m(
@@ -841,7 +841,7 @@ void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2_m(
@@ -850,7 +850,7 @@ void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4_m(
@@ -859,7 +859,7 @@ void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8_m(
@@ -868,7 +868,7 @@ void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8_m(
@@ -877,7 +877,7 @@ void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4_m(
@@ -886,7 +886,7 @@ void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2_m(
@@ -895,7 +895,7 @@ void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1_m(
@@ -904,7 +904,7 @@ void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2_m(
@@ -913,7 +913,7 @@ void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4_m(
@@ -922,7 +922,7 @@ void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8_m(
@@ -931,7 +931,7 @@ void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4_m(
@@ -940,7 +940,7 @@ void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2_m(
@@ -949,7 +949,7 @@ void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1_m(
@@ -958,7 +958,7 @@ void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2_m(
@@ -967,7 +967,7 @@ void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4_m(
@@ -976,7 +976,7 @@ void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8_m(
@@ -985,7 +985,7 @@ void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2_m(
@@ -994,7 +994,7 @@ void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1_m(
@@ -1003,7 +1003,7 @@ void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2_m(
@@ -1012,7 +1012,7 @@ void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4_m(
@@ -1021,7 +1021,7 @@ void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8_m(
@@ -1030,7 +1030,7 @@ void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1_m(
@@ -1039,7 +1039,7 @@ void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2_m(
@@ -1048,7 +1048,7 @@ void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4_m(
@@ -1057,7 +1057,7 @@ void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8_m(
@@ -1066,6 +1066,6 @@ void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
- return vsoxei8(mask, base, bindex, value, vl);
+ return __riscv_vsoxei8(mask, base, bindex, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c
index 060bbc857976..69158ce9d7dd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsoxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2(
@@ -58,7 +58,7 @@ void test_vsoxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1(
@@ -67,7 +67,7 @@ void test_vsoxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2(
@@ -76,7 +76,7 @@ void test_vsoxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4(
@@ -85,7 +85,7 @@ void test_vsoxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1(
@@ -94,7 +94,7 @@ void test_vsoxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2(
@@ -103,7 +103,7 @@ void test_vsoxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4(
@@ -112,7 +112,7 @@ void test_vsoxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8(
@@ -121,7 +121,7 @@ void test_vsoxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4(
@@ -130,7 +130,7 @@ void test_vsoxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2(
@@ -139,7 +139,7 @@ void test_vsoxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1(
@@ -148,7 +148,7 @@ void test_vsoxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2(
@@ -157,7 +157,7 @@ void test_vsoxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4(
@@ -166,7 +166,7 @@ void test_vsoxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4(
@@ -175,7 +175,7 @@ void test_vsoxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2(
@@ -184,7 +184,7 @@ void test_vsoxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1(
@@ -193,7 +193,7 @@ void test_vsoxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2(
@@ -202,7 +202,7 @@ void test_vsoxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4(
@@ -211,7 +211,7 @@ void test_vsoxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2(
@@ -238,7 +238,7 @@ void test_vsoxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4(
@@ -247,7 +247,7 @@ void test_vsoxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1(
@@ -256,7 +256,7 @@ void test_vsoxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2(
@@ -265,7 +265,7 @@ void test_vsoxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4(
@@ -274,7 +274,7 @@ void test_vsoxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8(
@@ -283,7 +283,7 @@ void test_vsoxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4(
@@ -292,7 +292,7 @@ void test_vsoxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2(
@@ -301,7 +301,7 @@ void test_vsoxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1(
@@ -310,7 +310,7 @@ void test_vsoxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2(
@@ -319,7 +319,7 @@ void test_vsoxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4(
@@ -328,7 +328,7 @@ void test_vsoxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4(
@@ -337,7 +337,7 @@ void test_vsoxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2(
@@ -346,7 +346,7 @@ void test_vsoxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1(
@@ -355,7 +355,7 @@ void test_vsoxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2(
@@ -364,7 +364,7 @@ void test_vsoxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4(
@@ -373,7 +373,7 @@ void test_vsoxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2(
@@ -382,7 +382,7 @@ void test_vsoxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1(
@@ -391,7 +391,7 @@ void test_vsoxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2(
@@ -400,7 +400,7 @@ void test_vsoxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4(
@@ -409,7 +409,7 @@ void test_vsoxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1(
@@ -418,7 +418,7 @@ void test_vsoxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2(
@@ -427,7 +427,7 @@ void test_vsoxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4(
@@ -436,7 +436,7 @@ void test_vsoxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsoxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4_m(
@@ -445,7 +445,7 @@ void test_vsoxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2_m(
@@ -472,7 +472,7 @@ void test_vsoxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4_m(
@@ -481,7 +481,7 @@ void test_vsoxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2_m(
@@ -490,7 +490,7 @@ void test_vsoxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1_m(
@@ -499,7 +499,7 @@ void test_vsoxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2_m(
@@ -508,7 +508,7 @@ void test_vsoxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4_m(
@@ -517,7 +517,7 @@ void test_vsoxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1_m(
@@ -526,7 +526,7 @@ void test_vsoxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2_m(
@@ -535,7 +535,7 @@ void test_vsoxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4_m(
@@ -544,7 +544,7 @@ void test_vsoxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m(
@@ -553,7 +553,7 @@ void test_vsoxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m(
@@ -598,7 +598,7 @@ void test_vsoxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m(
@@ -607,7 +607,7 @@ void test_vsoxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m(
@@ -616,7 +616,7 @@ void test_vsoxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m(
@@ -625,7 +625,7 @@ void test_vsoxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m(
@@ -634,7 +634,7 @@ void test_vsoxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m(
@@ -643,7 +643,7 @@ void test_vsoxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m(
@@ -652,7 +652,7 @@ void test_vsoxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m(
@@ -661,7 +661,7 @@ void test_vsoxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m(
@@ -670,7 +670,7 @@ void test_vsoxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m(
@@ -679,7 +679,7 @@ void test_vsoxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m(
@@ -688,7 +688,7 @@ void test_vsoxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m(
@@ -697,7 +697,7 @@ void test_vsoxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m(
@@ -706,7 +706,7 @@ void test_vsoxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m(
@@ -715,7 +715,7 @@ void test_vsoxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m(
@@ -724,7 +724,7 @@ void test_vsoxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m(
@@ -733,7 +733,7 @@ void test_vsoxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m(
@@ -742,7 +742,7 @@ void test_vsoxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m(
@@ -751,7 +751,7 @@ void test_vsoxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m(
@@ -760,7 +760,7 @@ void test_vsoxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m(
@@ -769,7 +769,7 @@ void test_vsoxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m(
@@ -778,7 +778,7 @@ void test_vsoxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m(
@@ -787,7 +787,7 @@ void test_vsoxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m(
@@ -796,7 +796,7 @@ void test_vsoxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m(
@@ -805,7 +805,7 @@ void test_vsoxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m(
@@ -814,7 +814,7 @@ void test_vsoxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m(
@@ -823,7 +823,7 @@ void test_vsoxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m(
@@ -832,7 +832,7 @@ void test_vsoxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m(
@@ -841,7 +841,7 @@ void test_vsoxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m(
@@ -850,7 +850,7 @@ void test_vsoxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m(
@@ -859,7 +859,7 @@ void test_vsoxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m(
@@ -868,6 +868,6 @@ void test_vsoxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c
index 021d7f64a8d0..dd0d5a04794d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsoxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2(
@@ -58,7 +58,7 @@ void test_vsoxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1(
@@ -67,7 +67,7 @@ void test_vsoxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2(
@@ -76,7 +76,7 @@ void test_vsoxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4(
@@ -85,7 +85,7 @@ void test_vsoxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1(
@@ -94,7 +94,7 @@ void test_vsoxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2(
@@ -103,7 +103,7 @@ void test_vsoxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4(
@@ -112,7 +112,7 @@ void test_vsoxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8(
@@ -121,7 +121,7 @@ void test_vsoxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4(
@@ -130,7 +130,7 @@ void test_vsoxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2(
@@ -139,7 +139,7 @@ void test_vsoxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1(
@@ -148,7 +148,7 @@ void test_vsoxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2(
@@ -157,7 +157,7 @@ void test_vsoxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1(
@@ -184,7 +184,7 @@ void test_vsoxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2(
@@ -193,7 +193,7 @@ void test_vsoxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4(
@@ -202,7 +202,7 @@ void test_vsoxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2(
@@ -211,7 +211,7 @@ void test_vsoxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1(
@@ -220,7 +220,7 @@ void test_vsoxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2(
@@ -229,7 +229,7 @@ void test_vsoxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4(
@@ -238,7 +238,7 @@ void test_vsoxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1(
@@ -247,7 +247,7 @@ void test_vsoxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2(
@@ -256,7 +256,7 @@ void test_vsoxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4(
@@ -265,7 +265,7 @@ void test_vsoxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8(
@@ -274,7 +274,7 @@ void test_vsoxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4(
@@ -283,7 +283,7 @@ void test_vsoxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2(
@@ -292,7 +292,7 @@ void test_vsoxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1(
@@ -301,7 +301,7 @@ void test_vsoxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2(
@@ -310,7 +310,7 @@ void test_vsoxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4(
@@ -319,7 +319,7 @@ void test_vsoxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2(
@@ -328,7 +328,7 @@ void test_vsoxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1(
@@ -337,7 +337,7 @@ void test_vsoxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2(
@@ -346,7 +346,7 @@ void test_vsoxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4(
@@ -355,7 +355,7 @@ void test_vsoxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2(
@@ -364,7 +364,7 @@ void test_vsoxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1(
@@ -373,7 +373,7 @@ void test_vsoxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2(
@@ -382,7 +382,7 @@ void test_vsoxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4(
@@ -391,7 +391,7 @@ void test_vsoxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1(
@@ -400,7 +400,7 @@ void test_vsoxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2(
@@ -409,7 +409,7 @@ void test_vsoxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4(
@@ -418,7 +418,7 @@ void test_vsoxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsoxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4_m(
@@ -463,7 +463,7 @@ void test_vsoxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2_m(
@@ -472,7 +472,7 @@ void test_vsoxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1_m(
@@ -481,7 +481,7 @@ void test_vsoxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2_m(
@@ -490,7 +490,7 @@ void test_vsoxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4_m(
@@ -499,7 +499,7 @@ void test_vsoxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1_m(
@@ -508,7 +508,7 @@ void test_vsoxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2_m(
@@ -517,7 +517,7 @@ void test_vsoxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4_m(
@@ -526,7 +526,7 @@ void test_vsoxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m(
@@ -535,7 +535,7 @@ void test_vsoxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m(
@@ -544,7 +544,7 @@ void test_vsoxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m(
@@ -553,7 +553,7 @@ void test_vsoxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m(
@@ -562,7 +562,7 @@ void test_vsoxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m(
@@ -580,7 +580,7 @@ void test_vsoxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m(
@@ -598,7 +598,7 @@ void test_vsoxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m(
@@ -607,7 +607,7 @@ void test_vsoxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m(
@@ -616,7 +616,7 @@ void test_vsoxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m(
@@ -625,7 +625,7 @@ void test_vsoxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m(
@@ -634,7 +634,7 @@ void test_vsoxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m(
@@ -643,7 +643,7 @@ void test_vsoxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m(
@@ -652,7 +652,7 @@ void test_vsoxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m(
@@ -661,7 +661,7 @@ void test_vsoxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m(
@@ -670,7 +670,7 @@ void test_vsoxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m(
@@ -679,7 +679,7 @@ void test_vsoxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m(
@@ -688,7 +688,7 @@ void test_vsoxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m(
@@ -697,7 +697,7 @@ void test_vsoxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m(
@@ -706,7 +706,7 @@ void test_vsoxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m(
@@ -715,7 +715,7 @@ void test_vsoxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m(
@@ -724,7 +724,7 @@ void test_vsoxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m(
@@ -733,7 +733,7 @@ void test_vsoxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m(
@@ -742,7 +742,7 @@ void test_vsoxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m(
@@ -751,7 +751,7 @@ void test_vsoxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m(
@@ -760,7 +760,7 @@ void test_vsoxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m(
@@ -769,7 +769,7 @@ void test_vsoxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m(
@@ -778,7 +778,7 @@ void test_vsoxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m(
@@ -787,7 +787,7 @@ void test_vsoxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m(
@@ -796,7 +796,7 @@ void test_vsoxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m(
@@ -805,7 +805,7 @@ void test_vsoxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m(
@@ -814,7 +814,7 @@ void test_vsoxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m(
@@ -823,7 +823,7 @@ void test_vsoxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m(
@@ -832,6 +832,6 @@ void test_vsoxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c
index 7d9e3dea5c25..76978fc96811 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4(
@@ -76,7 +76,7 @@ void test_vsoxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1(
@@ -85,7 +85,7 @@ void test_vsoxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2(
@@ -94,7 +94,7 @@ void test_vsoxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4(
@@ -103,7 +103,7 @@ void test_vsoxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8(
@@ -112,7 +112,7 @@ void test_vsoxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4(
@@ -121,7 +121,7 @@ void test_vsoxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1(
@@ -139,7 +139,7 @@ void test_vsoxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4(
@@ -148,7 +148,7 @@ void test_vsoxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2(
@@ -157,7 +157,7 @@ void test_vsoxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1(
@@ -166,7 +166,7 @@ void test_vsoxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2(
@@ -175,7 +175,7 @@ void test_vsoxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2(
@@ -184,7 +184,7 @@ void test_vsoxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1(
@@ -193,7 +193,7 @@ void test_vsoxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2(
@@ -202,7 +202,7 @@ void test_vsoxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4(
@@ -211,7 +211,7 @@ void test_vsoxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1(
@@ -220,7 +220,7 @@ void test_vsoxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2(
@@ -229,7 +229,7 @@ void test_vsoxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4(
@@ -238,7 +238,7 @@ void test_vsoxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8(
@@ -247,7 +247,7 @@ void test_vsoxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4(
@@ -256,7 +256,7 @@ void test_vsoxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2(
@@ -265,7 +265,7 @@ void test_vsoxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1(
@@ -274,7 +274,7 @@ void test_vsoxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4(
@@ -283,7 +283,7 @@ void test_vsoxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2(
@@ -292,7 +292,7 @@ void test_vsoxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1(
@@ -301,7 +301,7 @@ void test_vsoxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2(
@@ -310,7 +310,7 @@ void test_vsoxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2(
@@ -319,7 +319,7 @@ void test_vsoxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1(
@@ -328,7 +328,7 @@ void test_vsoxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2(
@@ -337,7 +337,7 @@ void test_vsoxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4(
@@ -346,7 +346,7 @@ void test_vsoxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1(
@@ -355,7 +355,7 @@ void test_vsoxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2(
@@ -364,7 +364,7 @@ void test_vsoxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4(
@@ -373,7 +373,7 @@ void test_vsoxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsoxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4_m(
@@ -382,7 +382,7 @@ void test_vsoxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2_m(
@@ -391,7 +391,7 @@ void test_vsoxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1_m(
@@ -400,7 +400,7 @@ void test_vsoxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2_m(
@@ -418,7 +418,7 @@ void test_vsoxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1_m(
@@ -427,7 +427,7 @@ void test_vsoxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4_m(
@@ -445,7 +445,7 @@ void test_vsoxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1_m(
@@ -454,7 +454,7 @@ void test_vsoxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2_m(
@@ -463,7 +463,7 @@ void test_vsoxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4_m(
@@ -472,7 +472,7 @@ void test_vsoxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m(
@@ -481,7 +481,7 @@ void test_vsoxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m(
@@ -490,7 +490,7 @@ void test_vsoxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m(
@@ -499,7 +499,7 @@ void test_vsoxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m(
@@ -508,7 +508,7 @@ void test_vsoxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m(
@@ -517,7 +517,7 @@ void test_vsoxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m(
@@ -526,7 +526,7 @@ void test_vsoxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m(
@@ -535,7 +535,7 @@ void test_vsoxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m(
@@ -544,7 +544,7 @@ void test_vsoxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m(
@@ -553,7 +553,7 @@ void test_vsoxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m(
@@ -562,7 +562,7 @@ void test_vsoxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m(
@@ -580,7 +580,7 @@ void test_vsoxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m(
@@ -589,7 +589,7 @@ void test_vsoxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m(
@@ -598,7 +598,7 @@ void test_vsoxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m(
@@ -607,7 +607,7 @@ void test_vsoxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m(
@@ -616,7 +616,7 @@ void test_vsoxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m(
@@ -625,7 +625,7 @@ void test_vsoxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m(
@@ -634,7 +634,7 @@ void test_vsoxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m(
@@ -643,7 +643,7 @@ void test_vsoxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m(
@@ -652,7 +652,7 @@ void test_vsoxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m(
@@ -661,7 +661,7 @@ void test_vsoxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m(
@@ -670,7 +670,7 @@ void test_vsoxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m(
@@ -679,7 +679,7 @@ void test_vsoxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m(
@@ -688,7 +688,7 @@ void test_vsoxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m(
@@ -697,7 +697,7 @@ void test_vsoxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m(
@@ -706,7 +706,7 @@ void test_vsoxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m(
@@ -715,7 +715,7 @@ void test_vsoxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m(
@@ -724,7 +724,7 @@ void test_vsoxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m(
@@ -733,7 +733,7 @@ void test_vsoxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m(
@@ -742,6 +742,6 @@ void test_vsoxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c
index 98db6af3eb8b..916799dbd33a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsoxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2(
@@ -58,7 +58,7 @@ void test_vsoxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1(
@@ -67,7 +67,7 @@ void test_vsoxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2(
@@ -76,7 +76,7 @@ void test_vsoxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4(
@@ -85,7 +85,7 @@ void test_vsoxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1(
@@ -94,7 +94,7 @@ void test_vsoxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2(
@@ -103,7 +103,7 @@ void test_vsoxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4(
@@ -112,7 +112,7 @@ void test_vsoxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8(
@@ -121,7 +121,7 @@ void test_vsoxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4(
@@ -130,7 +130,7 @@ void test_vsoxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2(
@@ -139,7 +139,7 @@ void test_vsoxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1(
@@ -148,7 +148,7 @@ void test_vsoxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2(
@@ -157,7 +157,7 @@ void test_vsoxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4(
@@ -166,7 +166,7 @@ void test_vsoxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4(
@@ -175,7 +175,7 @@ void test_vsoxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2(
@@ -184,7 +184,7 @@ void test_vsoxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1(
@@ -193,7 +193,7 @@ void test_vsoxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2(
@@ -202,7 +202,7 @@ void test_vsoxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4(
@@ -211,7 +211,7 @@ void test_vsoxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2(
@@ -238,7 +238,7 @@ void test_vsoxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4(
@@ -247,7 +247,7 @@ void test_vsoxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1(
@@ -256,7 +256,7 @@ void test_vsoxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2(
@@ -265,7 +265,7 @@ void test_vsoxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4(
@@ -274,7 +274,7 @@ void test_vsoxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8(
@@ -283,7 +283,7 @@ void test_vsoxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4(
@@ -292,7 +292,7 @@ void test_vsoxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2(
@@ -301,7 +301,7 @@ void test_vsoxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1(
@@ -310,7 +310,7 @@ void test_vsoxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2(
@@ -319,7 +319,7 @@ void test_vsoxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4(
@@ -328,7 +328,7 @@ void test_vsoxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4(
@@ -337,7 +337,7 @@ void test_vsoxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2(
@@ -346,7 +346,7 @@ void test_vsoxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1(
@@ -355,7 +355,7 @@ void test_vsoxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2(
@@ -364,7 +364,7 @@ void test_vsoxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4(
@@ -373,7 +373,7 @@ void test_vsoxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2(
@@ -382,7 +382,7 @@ void test_vsoxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1(
@@ -391,7 +391,7 @@ void test_vsoxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2(
@@ -400,7 +400,7 @@ void test_vsoxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4(
@@ -409,7 +409,7 @@ void test_vsoxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1(
@@ -418,7 +418,7 @@ void test_vsoxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2(
@@ -427,7 +427,7 @@ void test_vsoxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4(
@@ -436,7 +436,7 @@ void test_vsoxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsoxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4_m(
@@ -445,7 +445,7 @@ void test_vsoxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2_m(
@@ -472,7 +472,7 @@ void test_vsoxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4_m(
@@ -481,7 +481,7 @@ void test_vsoxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2_m(
@@ -490,7 +490,7 @@ void test_vsoxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1_m(
@@ -499,7 +499,7 @@ void test_vsoxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2_m(
@@ -508,7 +508,7 @@ void test_vsoxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4_m(
@@ -517,7 +517,7 @@ void test_vsoxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1_m(
@@ -526,7 +526,7 @@ void test_vsoxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2_m(
@@ -535,7 +535,7 @@ void test_vsoxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4_m(
@@ -544,7 +544,7 @@ void test_vsoxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m(
@@ -553,7 +553,7 @@ void test_vsoxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m(
@@ -598,7 +598,7 @@ void test_vsoxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m(
@@ -607,7 +607,7 @@ void test_vsoxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m(
@@ -616,7 +616,7 @@ void test_vsoxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m(
@@ -625,7 +625,7 @@ void test_vsoxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m(
@@ -634,7 +634,7 @@ void test_vsoxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m(
@@ -643,7 +643,7 @@ void test_vsoxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m(
@@ -652,7 +652,7 @@ void test_vsoxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m(
@@ -661,7 +661,7 @@ void test_vsoxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m(
@@ -670,7 +670,7 @@ void test_vsoxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m(
@@ -679,7 +679,7 @@ void test_vsoxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m(
@@ -688,7 +688,7 @@ void test_vsoxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m(
@@ -697,7 +697,7 @@ void test_vsoxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m(
@@ -706,7 +706,7 @@ void test_vsoxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m(
@@ -715,7 +715,7 @@ void test_vsoxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m(
@@ -724,7 +724,7 @@ void test_vsoxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m(
@@ -733,7 +733,7 @@ void test_vsoxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m(
@@ -742,7 +742,7 @@ void test_vsoxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m(
@@ -751,7 +751,7 @@ void test_vsoxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m(
@@ -760,7 +760,7 @@ void test_vsoxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m(
@@ -769,7 +769,7 @@ void test_vsoxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m(
@@ -778,7 +778,7 @@ void test_vsoxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m(
@@ -787,7 +787,7 @@ void test_vsoxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m(
@@ -796,7 +796,7 @@ void test_vsoxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m(
@@ -805,7 +805,7 @@ void test_vsoxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m(
@@ -814,7 +814,7 @@ void test_vsoxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m(
@@ -823,7 +823,7 @@ void test_vsoxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m(
@@ -832,7 +832,7 @@ void test_vsoxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m(
@@ -841,7 +841,7 @@ void test_vsoxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m(
@@ -850,7 +850,7 @@ void test_vsoxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m(
@@ -859,7 +859,7 @@ void test_vsoxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m(
@@ -868,6 +868,6 @@ void test_vsoxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c
index d7c049262a44..1e8331b084ef 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsoxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsoxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsoxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsoxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsoxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsoxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsoxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsoxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsoxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsoxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsoxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsoxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsoxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsoxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsoxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsoxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsoxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsoxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsoxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsoxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsoxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsoxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsoxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsoxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsoxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsoxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsoxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsoxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsoxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsoxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsoxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsoxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsoxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsoxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsoxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsoxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsoxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsoxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsoxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsoxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsoxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsoxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsoxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsoxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsoxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsoxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsoxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsoxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsoxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsoxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsoxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsoxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsoxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsoxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsoxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsoxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsoxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsoxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsoxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsoxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c
index 612523f7977d..6be62c4a535d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsoxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsoxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsoxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsoxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsoxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsoxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsoxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsoxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsoxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsoxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsoxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsoxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsoxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsoxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsoxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsoxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsoxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsoxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsoxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsoxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsoxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsoxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsoxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsoxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsoxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsoxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsoxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsoxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsoxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsoxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsoxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsoxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsoxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsoxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsoxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsoxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsoxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsoxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsoxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsoxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsoxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsoxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsoxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsoxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsoxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsoxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsoxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsoxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsoxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsoxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsoxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsoxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsoxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsoxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsoxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsoxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsoxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsoxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsoxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsoxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c
index 1b179596bcd7..72febc7ce3c3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsoxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsoxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsoxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsoxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4(
@@ -130,7 +130,7 @@ void test_vsoxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2(
@@ -139,7 +139,7 @@ void test_vsoxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1(
@@ -148,7 +148,7 @@ void test_vsoxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2(
@@ -157,7 +157,7 @@ void test_vsoxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2(
@@ -166,7 +166,7 @@ void test_vsoxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1(
@@ -175,7 +175,7 @@ void test_vsoxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2(
@@ -184,7 +184,7 @@ void test_vsoxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1(
@@ -193,7 +193,7 @@ void test_vsoxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2(
@@ -202,7 +202,7 @@ void test_vsoxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8(
@@ -211,7 +211,7 @@ void test_vsoxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4(
@@ -220,7 +220,7 @@ void test_vsoxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2(
@@ -229,7 +229,7 @@ void test_vsoxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1(
@@ -238,7 +238,7 @@ void test_vsoxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4(
@@ -247,7 +247,7 @@ void test_vsoxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2(
@@ -256,7 +256,7 @@ void test_vsoxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1(
@@ -265,7 +265,7 @@ void test_vsoxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2(
@@ -274,7 +274,7 @@ void test_vsoxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2(
@@ -283,7 +283,7 @@ void test_vsoxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1(
@@ -292,7 +292,7 @@ void test_vsoxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2(
@@ -301,7 +301,7 @@ void test_vsoxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1(
@@ -310,7 +310,7 @@ void test_vsoxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2(
@@ -319,7 +319,7 @@ void test_vsoxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4_m(
@@ -328,7 +328,7 @@ void test_vsoxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2_m(
@@ -337,7 +337,7 @@ void test_vsoxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1_m(
@@ -346,7 +346,7 @@ void test_vsoxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2_m(
@@ -355,7 +355,7 @@ void test_vsoxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2_m(
@@ -382,7 +382,7 @@ void test_vsoxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1_m(
@@ -391,7 +391,7 @@ void test_vsoxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2_m(
@@ -400,7 +400,7 @@ void test_vsoxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m(
@@ -409,7 +409,7 @@ void test_vsoxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m(
@@ -418,7 +418,7 @@ void test_vsoxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m(
@@ -427,7 +427,7 @@ void test_vsoxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m(
@@ -436,7 +436,7 @@ void test_vsoxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m(
@@ -445,7 +445,7 @@ void test_vsoxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m(
@@ -472,7 +472,7 @@ void test_vsoxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m(
@@ -481,7 +481,7 @@ void test_vsoxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m(
@@ -490,7 +490,7 @@ void test_vsoxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m(
@@ -499,7 +499,7 @@ void test_vsoxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m(
@@ -508,7 +508,7 @@ void test_vsoxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m(
@@ -517,7 +517,7 @@ void test_vsoxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m(
@@ -526,7 +526,7 @@ void test_vsoxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m(
@@ -535,7 +535,7 @@ void test_vsoxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m(
@@ -544,7 +544,7 @@ void test_vsoxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m(
@@ -553,7 +553,7 @@ void test_vsoxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m(
@@ -598,7 +598,7 @@ void test_vsoxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m(
@@ -607,7 +607,7 @@ void test_vsoxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m(
@@ -616,7 +616,7 @@ void test_vsoxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m(
@@ -625,7 +625,7 @@ void test_vsoxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m(
@@ -634,6 +634,6 @@ void test_vsoxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c
index e7b8089b0293..fed95bbc2428 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsoxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsoxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsoxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsoxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsoxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsoxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsoxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsoxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsoxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsoxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsoxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsoxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsoxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsoxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsoxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsoxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsoxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsoxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsoxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsoxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsoxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsoxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsoxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsoxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsoxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsoxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsoxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsoxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsoxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsoxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsoxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsoxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsoxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsoxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsoxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsoxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsoxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsoxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsoxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsoxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsoxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsoxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsoxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsoxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsoxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsoxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsoxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsoxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsoxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsoxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsoxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsoxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsoxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsoxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsoxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsoxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsoxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsoxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsoxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsoxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c
index 54d62563c221..53cfe6f257be 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsoxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsoxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsoxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsoxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsoxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsoxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsoxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsoxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsoxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsoxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsoxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsoxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsoxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsoxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsoxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsoxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsoxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsoxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsoxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsoxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsoxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsoxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsoxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsoxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsoxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsoxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsoxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsoxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsoxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsoxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsoxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsoxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsoxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsoxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsoxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsoxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsoxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsoxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsoxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsoxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsoxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsoxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsoxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsoxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsoxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsoxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsoxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsoxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsoxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsoxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsoxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsoxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsoxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsoxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsoxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsoxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsoxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsoxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsoxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsoxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c
index c3c6cf42b5ab..8d55eb234555 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsoxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsoxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsoxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsoxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsoxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsoxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsoxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsoxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsoxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsoxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsoxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsoxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsoxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsoxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsoxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsoxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsoxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsoxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsoxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsoxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsoxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsoxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsoxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsoxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsoxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsoxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsoxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsoxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsoxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsoxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsoxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsoxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsoxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsoxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsoxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsoxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsoxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsoxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsoxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsoxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsoxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsoxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsoxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsoxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsoxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsoxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsoxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsoxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsoxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsoxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsoxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsoxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsoxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsoxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsoxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsoxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsoxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsoxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsoxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsoxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c
index 68a6d5e6e651..c2010466b904 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsoxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsoxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsoxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsoxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4(
@@ -130,7 +130,7 @@ void test_vsoxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2(
@@ -139,7 +139,7 @@ void test_vsoxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1(
@@ -148,7 +148,7 @@ void test_vsoxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2(
@@ -157,7 +157,7 @@ void test_vsoxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2(
@@ -166,7 +166,7 @@ void test_vsoxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1(
@@ -175,7 +175,7 @@ void test_vsoxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2(
@@ -184,7 +184,7 @@ void test_vsoxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1(
@@ -193,7 +193,7 @@ void test_vsoxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2(
@@ -202,7 +202,7 @@ void test_vsoxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8(
@@ -211,7 +211,7 @@ void test_vsoxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4(
@@ -220,7 +220,7 @@ void test_vsoxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2(
@@ -229,7 +229,7 @@ void test_vsoxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1(
@@ -238,7 +238,7 @@ void test_vsoxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4(
@@ -247,7 +247,7 @@ void test_vsoxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2(
@@ -256,7 +256,7 @@ void test_vsoxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1(
@@ -265,7 +265,7 @@ void test_vsoxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2(
@@ -274,7 +274,7 @@ void test_vsoxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2(
@@ -283,7 +283,7 @@ void test_vsoxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1(
@@ -292,7 +292,7 @@ void test_vsoxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2(
@@ -301,7 +301,7 @@ void test_vsoxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1(
@@ -310,7 +310,7 @@ void test_vsoxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2(
@@ -319,7 +319,7 @@ void test_vsoxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4_m(
@@ -328,7 +328,7 @@ void test_vsoxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2_m(
@@ -337,7 +337,7 @@ void test_vsoxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1_m(
@@ -346,7 +346,7 @@ void test_vsoxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2_m(
@@ -355,7 +355,7 @@ void test_vsoxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2_m(
@@ -382,7 +382,7 @@ void test_vsoxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1_m(
@@ -391,7 +391,7 @@ void test_vsoxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2_m(
@@ -400,7 +400,7 @@ void test_vsoxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m(
@@ -409,7 +409,7 @@ void test_vsoxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m(
@@ -418,7 +418,7 @@ void test_vsoxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m(
@@ -427,7 +427,7 @@ void test_vsoxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m(
@@ -436,7 +436,7 @@ void test_vsoxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m(
@@ -445,7 +445,7 @@ void test_vsoxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m(
@@ -472,7 +472,7 @@ void test_vsoxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m(
@@ -481,7 +481,7 @@ void test_vsoxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m(
@@ -490,7 +490,7 @@ void test_vsoxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m(
@@ -499,7 +499,7 @@ void test_vsoxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m(
@@ -508,7 +508,7 @@ void test_vsoxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m(
@@ -517,7 +517,7 @@ void test_vsoxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m(
@@ -526,7 +526,7 @@ void test_vsoxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m(
@@ -535,7 +535,7 @@ void test_vsoxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m(
@@ -544,7 +544,7 @@ void test_vsoxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m(
@@ -553,7 +553,7 @@ void test_vsoxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m(
@@ -598,7 +598,7 @@ void test_vsoxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m(
@@ -607,7 +607,7 @@ void test_vsoxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m(
@@ -616,7 +616,7 @@ void test_vsoxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m(
@@ -625,7 +625,7 @@ void test_vsoxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m(
@@ -634,6 +634,6 @@ void test_vsoxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c
index c69640a3ac4f..dff0b1587e8b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsoxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsoxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsoxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsoxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsoxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsoxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsoxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsoxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsoxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsoxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsoxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsoxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsoxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsoxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsoxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsoxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsoxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsoxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsoxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsoxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsoxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsoxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsoxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsoxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsoxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsoxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsoxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsoxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsoxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsoxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsoxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsoxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsoxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsoxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsoxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsoxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsoxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsoxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsoxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsoxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsoxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsoxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsoxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsoxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsoxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsoxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsoxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsoxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsoxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsoxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsoxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsoxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsoxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsoxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsoxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsoxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsoxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsoxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsoxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsoxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsoxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsoxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsoxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsoxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsoxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsoxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsoxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsoxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c
index ccb74afd8d25..2fa5b0ac75fb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c
index 3eed214d7900..09b7b0731205 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c
index 7cdf211fe9fd..efa78867d179 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c
index 33980d880335..e5d19d36a97a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c
index 514d9b95efd5..c931dec8a864 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c
index 56c2a80e7bd0..c87592f733b4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c
index a981674621b2..ee9944d02add 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c
index e3246bf47175..ef6f382b1e81 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c
index e8f012420ea0..6557a3c68d98 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c
index 9e04acf184d3..2543685a9fac 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c
index 86f5f4651ac0..3fa23ad191d7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c
index 0c9bb4c9d601..fd24a2010e48 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c
index 19a9ca9b59fd..6f77ff0c5578 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c
index b51b66e19923..e41d471edd27 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c
index cf3fbf4fe4c8..9fc75e99de6a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c
index 4a0ffff21770..898e41fc8f1f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsoxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsoxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsoxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsoxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsoxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsoxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsoxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsoxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsoxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsoxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsoxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsoxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsoxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsoxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsoxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsoxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsoxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsoxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsoxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsoxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsoxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsoxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsoxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsoxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsoxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsoxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsoxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsoxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsoxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsoxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsoxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsoxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsoxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsoxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsoxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsoxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsoxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsoxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsoxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsoxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsoxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsoxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsoxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsoxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsoxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsoxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsoxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsoxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsoxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsoxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsoxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsoxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsra.c
index c620d4b738d2..592ee0e121f9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsra.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
- return vsra(op1, shift, vl);
+ return __riscv_vsra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shif
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shif
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) {
- return vsra(mask, op1, shift, vl);
+ return __riscv_vsra(mask, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsrl.c
index 591fa9573d3d..ae1720e295fe 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsrl.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
- return vsrl(op1, shift, vl);
+ return __riscv_vsrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsrl(mask, op1, shift, vl);
+ return __riscv_vsrl(mask, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse16.c
index d80a51c9b229..6ae0227c34d6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsse16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsse16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsse16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsse16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16m8(
@@ -58,7 +58,7 @@ void test_vsse16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16m8(_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16mf4(
@@ -67,7 +67,7 @@ void test_vsse16_v_f16m8(_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16mf2(
@@ -76,7 +76,7 @@ void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16m1(
@@ -85,7 +85,7 @@ void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16m2(
@@ -94,7 +94,7 @@ void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16m4(
@@ -103,7 +103,7 @@ void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16m8(
@@ -112,7 +112,7 @@ void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16mf4(
@@ -121,7 +121,7 @@ void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16mf2(
@@ -130,7 +130,7 @@ void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16m1(
@@ -139,7 +139,7 @@ void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16m2(
@@ -148,7 +148,7 @@ void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16m4(
@@ -157,7 +157,7 @@ void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16m8(
@@ -166,7 +166,7 @@ void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) {
- return vsse16(base, bstride, value, vl);
+ return __riscv_vsse16(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4_m(
@@ -175,7 +175,7 @@ void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2_m(
@@ -184,7 +184,7 @@ void test_vsse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16m1_m(
@@ -193,7 +193,7 @@ void test_vsse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16m2_m(
@@ -202,7 +202,7 @@ void test_vsse16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16m4_m(
@@ -211,7 +211,7 @@ void test_vsse16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_f16m8_m(
@@ -220,7 +220,7 @@ void test_vsse16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_f16m8_m(vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16mf4_m(
@@ -229,7 +229,7 @@ void test_vsse16_v_f16m8_m(vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16mf2_m(
@@ -238,7 +238,7 @@ void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16m1_m(
@@ -247,7 +247,7 @@ void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16m2_m(
@@ -256,7 +256,7 @@ void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16m4_m(
@@ -265,7 +265,7 @@ void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_i16m8_m(
@@ -274,7 +274,7 @@ void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16mf4_m(
@@ -283,7 +283,7 @@ void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16mf2_m(
@@ -292,7 +292,7 @@ void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16m1_m(
@@ -301,7 +301,7 @@ void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16m2_m(
@@ -310,7 +310,7 @@ void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16m4_m(
@@ -319,7 +319,7 @@ void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vui
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse16_v_u16m8_m(
@@ -328,6 +328,6 @@ void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vui
// CHECK-RV64-NEXT: ret void
//
void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) {
- return vsse16(mask, base, bstride, value, vl);
+ return __riscv_vsse16(mask, base, bstride, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse32.c
index 25a946a1b3e1..2a6f2a3956b8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_f32m2(
@@ -31,7 +31,7 @@ void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_f32m4(
@@ -40,7 +40,7 @@ void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_f32m8(
@@ -49,7 +49,7 @@ void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32mf2(
@@ -58,7 +58,7 @@ void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32m1(
@@ -67,7 +67,7 @@ void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32m2(
@@ -76,7 +76,7 @@ void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32m4(
@@ -85,7 +85,7 @@ void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32m8(
@@ -94,7 +94,7 @@ void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32mf2(
@@ -103,7 +103,7 @@ void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32m1(
@@ -112,7 +112,7 @@ void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32m2(
@@ -121,7 +121,7 @@ void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32m4(
@@ -130,7 +130,7 @@ void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32m8(
@@ -139,7 +139,7 @@ void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) {
- return vsse32(base, bstride, value, vl);
+ return __riscv_vsse32(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_f32mf2_m(
@@ -148,7 +148,7 @@ void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_f32m1_m(
@@ -157,7 +157,7 @@ void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_f32m2_m(
@@ -166,7 +166,7 @@ void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_f32m4_m(
@@ -175,7 +175,7 @@ void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_f32m8_m(
@@ -184,7 +184,7 @@ void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32mf2_m(
@@ -193,7 +193,7 @@ void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32m1_m(
@@ -202,7 +202,7 @@ void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32m2_m(
@@ -211,7 +211,7 @@ void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32m4_m(
@@ -220,7 +220,7 @@ void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_i32m8_m(
@@ -229,7 +229,7 @@ void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32mf2_m(
@@ -238,7 +238,7 @@ void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32m1_m(
@@ -247,7 +247,7 @@ void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32m2_m(
@@ -256,7 +256,7 @@ void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32m4_m(
@@ -265,7 +265,7 @@ void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse32_v_u32m8_m(
@@ -274,6 +274,6 @@ void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vui
// CHECK-RV64-NEXT: ret void
//
void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) {
- return vsse32(mask, base, bstride, value, vl);
+ return __riscv_vsse32(mask, base, bstride, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse64.c
index 11f42653d6de..a8aef5ac0fbc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_f64m2(
@@ -22,7 +22,7 @@ void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_f64m4(
@@ -31,7 +31,7 @@ void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_f64m8(
@@ -40,7 +40,7 @@ void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_i64m1(
@@ -49,7 +49,7 @@ void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_i64m2(
@@ -58,7 +58,7 @@ void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_i64m4(
@@ -67,7 +67,7 @@ void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_i64m8(
@@ -76,7 +76,7 @@ void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_u64m1(
@@ -85,7 +85,7 @@ void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_u64m2(
@@ -94,7 +94,7 @@ void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_u64m4(
@@ -103,7 +103,7 @@ void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_u64m8(
@@ -112,7 +112,7 @@ void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) {
- return vsse64(base, bstride, value, vl);
+ return __riscv_vsse64(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_f64m1_m(
@@ -121,7 +121,7 @@ void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_f64m2_m(
@@ -130,7 +130,7 @@ void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_f64m4_m(
@@ -139,7 +139,7 @@ void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_f64m8_m(
@@ -148,7 +148,7 @@ void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_i64m1_m(
@@ -157,7 +157,7 @@ void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_i64m2_m(
@@ -166,7 +166,7 @@ void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_i64m4_m(
@@ -175,7 +175,7 @@ void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_i64m8_m(
@@ -184,7 +184,7 @@ void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_u64m1_m(
@@ -193,7 +193,7 @@ void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_u64m2_m(
@@ -202,7 +202,7 @@ void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_u64m4_m(
@@ -211,7 +211,7 @@ void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse64_v_u64m8_m(
@@ -220,6 +220,6 @@ void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) {
- return vsse64(mask, base, bstride, value, vl);
+ return __riscv_vsse64(mask, base, bstride, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse8.c
index 7a989617acd3..3eb0d4f5106e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsse8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8m2(
@@ -48,7 +48,7 @@ void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8m4(
@@ -57,7 +57,7 @@ void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8m8(
@@ -66,7 +66,7 @@ void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8mf8(
@@ -75,7 +75,7 @@ void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8mf4(
@@ -84,7 +84,7 @@ void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8mf2(
@@ -93,7 +93,7 @@ void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8m1(
@@ -102,7 +102,7 @@ void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8m2(
@@ -111,7 +111,7 @@ void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8m4(
@@ -120,7 +120,7 @@ void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8m8(
@@ -129,7 +129,7 @@ void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) {
- return vsse8(base, bstride, value, vl);
+ return __riscv_vsse8(base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8mf8_m(
@@ -138,7 +138,7 @@ void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8mf4_m(
@@ -147,7 +147,7 @@ void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8mf2_m(
@@ -156,7 +156,7 @@ void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8m1_m(
@@ -165,7 +165,7 @@ void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8m2_m(
@@ -174,7 +174,7 @@ void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8m4_m(
@@ -183,7 +183,7 @@ void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_i8m8_m(
@@ -192,7 +192,7 @@ void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8mf8_m(
@@ -201,7 +201,7 @@ void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8mf4_m(
@@ -210,7 +210,7 @@ void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8mf2_m(
@@ -219,7 +219,7 @@ void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8m1_m(
@@ -228,7 +228,7 @@ void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8m2_m(
@@ -237,7 +237,7 @@ void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8m4_m(
@@ -246,7 +246,7 @@ void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
// CHECK-RV64-LABEL: @test_vsse8_v_u8m8_m(
@@ -255,6 +255,6 @@ void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) {
- return vsse8(mask, base, bstride, value, vl);
+ return __riscv_vsse8(mask, base, bstride, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e16.c
index c2f76dfc9723..ac2abb88a37a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsseg2e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsseg2e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsseg2e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, si
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsseg2e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, si
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16m4(_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf4(
@@ -58,7 +58,7 @@ void test_vsseg2e16_v_f16m4(_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, si
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf2(
@@ -67,7 +67,7 @@ void test_vsseg2e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m1(
@@ -76,7 +76,7 @@ void test_vsseg2e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m2(
@@ -85,7 +85,7 @@ void test_vsseg2e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m4(
@@ -94,7 +94,7 @@ void test_vsseg2e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16m4(int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf4(
@@ -103,7 +103,7 @@ void test_vsseg2e16_v_i16m4(int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf2(
@@ -112,7 +112,7 @@ void test_vsseg2e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, s
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m1(
@@ -121,7 +121,7 @@ void test_vsseg2e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, s
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m2(
@@ -130,7 +130,7 @@ void test_vsseg2e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m4(
@@ -139,7 +139,7 @@ void test_vsseg2e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16m4(uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsseg2e16(base, v0, v1, vl);
+ return __riscv_vsseg2e16(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4_m(
@@ -148,7 +148,7 @@ void test_vsseg2e16_v_u16m4(uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2_m(
@@ -157,7 +157,7 @@ void test_vsseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1_m(
@@ -166,7 +166,7 @@ void test_vsseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2_m(
@@ -175,7 +175,7 @@ void test_vsseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4_m(
@@ -184,7 +184,7 @@ void test_vsseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf4_m(
@@ -193,7 +193,7 @@ void test_vsseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf2_m(
@@ -202,7 +202,7 @@ void test_vsseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m1_m(
@@ -211,7 +211,7 @@ void test_vsseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m2_m(
@@ -220,7 +220,7 @@ void test_vsseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m4_m(
@@ -229,7 +229,7 @@ void test_vsseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint1
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf4_m(
@@ -238,7 +238,7 @@ void test_vsseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t v0, vint1
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf2_m(
@@ -247,7 +247,7 @@ void test_vsseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m1_m(
@@ -256,7 +256,7 @@ void test_vsseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m2_m(
@@ -265,7 +265,7 @@ void test_vsseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m4_m(
@@ -274,6 +274,6 @@ void test_vsseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vui
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsseg2e16(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e16(mask, base, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e32.c
index e9c4b0444213..61cc911cd90f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vsseg2e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, si
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m2(
@@ -31,7 +31,7 @@ void test_vsseg2e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m4(
@@ -40,7 +40,7 @@ void test_vsseg2e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_f32m4(float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_i32mf2(
@@ -49,7 +49,7 @@ void test_vsseg2e32_v_f32m4(float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m1(
@@ -58,7 +58,7 @@ void test_vsseg2e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m2(
@@ -67,7 +67,7 @@ void test_vsseg2e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m4(
@@ -76,7 +76,7 @@ void test_vsseg2e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_i32m4(int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_u32mf2(
@@ -85,7 +85,7 @@ void test_vsseg2e32_v_i32m4(int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m1(
@@ -94,7 +94,7 @@ void test_vsseg2e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, s
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m2(
@@ -103,7 +103,7 @@ void test_vsseg2e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m4(
@@ -112,7 +112,7 @@ void test_vsseg2e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_u32m4(uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsseg2e32(base, v0, v1, vl);
+ return __riscv_vsseg2e32(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_f32mf2_m(
@@ -121,7 +121,7 @@ void test_vsseg2e32_v_u32m4(uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m1_m(
@@ -130,7 +130,7 @@ void test_vsseg2e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m2_m(
@@ -139,7 +139,7 @@ void test_vsseg2e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m4_m(
@@ -148,7 +148,7 @@ void test_vsseg2e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_i32mf2_m(
@@ -157,7 +157,7 @@ void test_vsseg2e32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t v0, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m1_m(
@@ -166,7 +166,7 @@ void test_vsseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m2_m(
@@ -175,7 +175,7 @@ void test_vsseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m4_m(
@@ -184,7 +184,7 @@ void test_vsseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_u32mf2_m(
@@ -193,7 +193,7 @@ void test_vsseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t v0, vint3
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m1_m(
@@ -202,7 +202,7 @@ void test_vsseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m2_m(
@@ -211,7 +211,7 @@ void test_vsseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m4_m(
@@ -220,6 +220,6 @@ void test_vsseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsseg2e32(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e32(mask, base, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e64.c
index 328d1c2c08dd..8dc449161b13 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsseg2e64(base, v0, v1, vl);
+ return __riscv_vsseg2e64(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m2(
@@ -22,7 +22,7 @@ void test_vsseg2e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsseg2e64(base, v0, v1, vl);
+ return __riscv_vsseg2e64(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m4(
@@ -31,7 +31,7 @@ void test_vsseg2e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_f64m4(double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsseg2e64(base, v0, v1, vl);
+ return __riscv_vsseg2e64(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m1(
@@ -40,7 +40,7 @@ void test_vsseg2e64_v_f64m4(double *base, vfloat64m4_t v0, vfloat64m4_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsseg2e64(base, v0, v1, vl);
+ return __riscv_vsseg2e64(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m2(
@@ -49,7 +49,7 @@ void test_vsseg2e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsseg2e64(base, v0, v1, vl);
+ return __riscv_vsseg2e64(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m4(
@@ -58,7 +58,7 @@ void test_vsseg2e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_i64m4(int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsseg2e64(base, v0, v1, vl);
+ return __riscv_vsseg2e64(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m1(
@@ -67,7 +67,7 @@ void test_vsseg2e64_v_i64m4(int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsseg2e64(base, v0, v1, vl);
+ return __riscv_vsseg2e64(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m2(
@@ -76,7 +76,7 @@ void test_vsseg2e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsseg2e64(base, v0, v1, vl);
+ return __riscv_vsseg2e64(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m4(
@@ -85,7 +85,7 @@ void test_vsseg2e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_u64m4(uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsseg2e64(base, v0, v1, vl);
+ return __riscv_vsseg2e64(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m1_m(
@@ -94,7 +94,7 @@ void test_vsseg2e64_v_u64m4(uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsseg2e64(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e64(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m2_m(
@@ -103,7 +103,7 @@ void test_vsseg2e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsseg2e64(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e64(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m4_m(
@@ -112,7 +112,7 @@ void test_vsseg2e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsseg2e64(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e64(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m1_m(
@@ -121,7 +121,7 @@ void test_vsseg2e64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsseg2e64(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e64(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m2_m(
@@ -130,7 +130,7 @@ void test_vsseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsseg2e64(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e64(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m4_m(
@@ -139,7 +139,7 @@ void test_vsseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsseg2e64(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e64(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m1_m(
@@ -148,7 +148,7 @@ void test_vsseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsseg2e64(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e64(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m2_m(
@@ -157,7 +157,7 @@ void test_vsseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsseg2e64(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e64(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m4_m(
@@ -166,6 +166,6 @@ void test_vsseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsseg2e64(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e64(mask, base, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e8.c
index 07e61774c226..320136add3cc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vsseg2e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vsseg2e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vsseg2e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m2(
@@ -48,7 +48,7 @@ void test_vsseg2e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m4(
@@ -57,7 +57,7 @@ void test_vsseg2e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8m4(int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf8(
@@ -66,7 +66,7 @@ void test_vsseg2e8_v_i8m4(int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) {
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf4(
@@ -75,7 +75,7 @@ void test_vsseg2e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf2(
@@ -84,7 +84,7 @@ void test_vsseg2e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m1(
@@ -93,7 +93,7 @@ void test_vsseg2e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m2(
@@ -102,7 +102,7 @@ void test_vsseg2e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m4(
@@ -111,7 +111,7 @@ void test_vsseg2e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8m4(uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsseg2e8(base, v0, v1, vl);
+ return __riscv_vsseg2e8(base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf8_m(
@@ -120,7 +120,7 @@ void test_vsseg2e8_v_u8m4(uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf4_m(
@@ -129,7 +129,7 @@ void test_vsseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf2_m(
@@ -138,7 +138,7 @@ void test_vsseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m1_m(
@@ -147,7 +147,7 @@ void test_vsseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m2_m(
@@ -156,7 +156,7 @@ void test_vsseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m4_m(
@@ -165,7 +165,7 @@ void test_vsseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf8_m(
@@ -174,7 +174,7 @@ void test_vsseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t v0, vint8m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf4_m(
@@ -183,7 +183,7 @@ void test_vsseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf2_m(
@@ -192,7 +192,7 @@ void test_vsseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m1_m(
@@ -201,7 +201,7 @@ void test_vsseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m2_m(
@@ -210,7 +210,7 @@ void test_vsseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m4_m(
@@ -219,6 +219,6 @@ void test_vsseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg2e8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsseg2e8(mask, base, v0, v1, vl);
+ return __riscv_vsseg2e8(mask, base, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e16.c
index fc66787c7921..475d605ae8f6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsseg3e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsseg3e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsseg3e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf4(
@@ -49,7 +49,7 @@ void test_vsseg3e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf2(
@@ -58,7 +58,7 @@ void test_vsseg3e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m1(
@@ -67,7 +67,7 @@ void test_vsseg3e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m2(
@@ -76,7 +76,7 @@ void test_vsseg3e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf4(
@@ -85,7 +85,7 @@ void test_vsseg3e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf2(
@@ -94,7 +94,7 @@ void test_vsseg3e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m1(
@@ -103,7 +103,7 @@ void test_vsseg3e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m2(
@@ -112,7 +112,7 @@ void test_vsseg3e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsseg3e16(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4_m(
@@ -121,7 +121,7 @@ void test_vsseg3e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2_m(
@@ -130,7 +130,7 @@ void test_vsseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1_m(
@@ -139,7 +139,7 @@ void test_vsseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2_m(
@@ -148,7 +148,7 @@ void test_vsseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf4_m(
@@ -157,7 +157,7 @@ void test_vsseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf2_m(
@@ -166,7 +166,7 @@ void test_vsseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m1_m(
@@ -175,7 +175,7 @@ void test_vsseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m2_m(
@@ -184,7 +184,7 @@ void test_vsseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf4_m(
@@ -193,7 +193,7 @@ void test_vsseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint1
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf2_m(
@@ -202,7 +202,7 @@ void test_vsseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m1_m(
@@ -211,7 +211,7 @@ void test_vsseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m2_m(
@@ -220,6 +220,6 @@ void test_vsseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsseg3e16(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e16(mask, base, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e32.c
index ef45fe315389..32bf8ec26b5b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsseg3e32(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vsseg3e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsseg3e32(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m2(
@@ -31,7 +31,7 @@ void test_vsseg3e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsseg3e32(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_i32mf2(
@@ -40,7 +40,7 @@ void test_vsseg3e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsseg3e32(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m1(
@@ -49,7 +49,7 @@ void test_vsseg3e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsseg3e32(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m2(
@@ -58,7 +58,7 @@ void test_vsseg3e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsseg3e32(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_u32mf2(
@@ -67,7 +67,7 @@ void test_vsseg3e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsseg3e32(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m1(
@@ -76,7 +76,7 @@ void test_vsseg3e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsseg3e32(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m2(
@@ -85,7 +85,7 @@ void test_vsseg3e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsseg3e32(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_f32mf2_m(
@@ -94,7 +94,7 @@ void test_vsseg3e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsseg3e32(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m1_m(
@@ -103,7 +103,7 @@ void test_vsseg3e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsseg3e32(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m2_m(
@@ -112,7 +112,7 @@ void test_vsseg3e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsseg3e32(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_i32mf2_m(
@@ -121,7 +121,7 @@ void test_vsseg3e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsseg3e32(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m1_m(
@@ -130,7 +130,7 @@ void test_vsseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsseg3e32(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m2_m(
@@ -139,7 +139,7 @@ void test_vsseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsseg3e32(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_u32mf2_m(
@@ -148,7 +148,7 @@ void test_vsseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsseg3e32(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m1_m(
@@ -157,7 +157,7 @@ void test_vsseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsseg3e32(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m2_m(
@@ -166,6 +166,6 @@ void test_vsseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsseg3e32(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e32(mask, base, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e64.c
index d5db45570c85..b8c95e260d12 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsseg3e64(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m2(
@@ -22,7 +22,7 @@ void test_vsseg3e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsseg3e64(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m1(
@@ -31,7 +31,7 @@ void test_vsseg3e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsseg3e64(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m2(
@@ -40,7 +40,7 @@ void test_vsseg3e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsseg3e64(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m1(
@@ -49,7 +49,7 @@ void test_vsseg3e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsseg3e64(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m2(
@@ -58,7 +58,7 @@ void test_vsseg3e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsseg3e64(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m1_m(
@@ -67,7 +67,7 @@ void test_vsseg3e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsseg3e64(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m2_m(
@@ -76,7 +76,7 @@ void test_vsseg3e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsseg3e64(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m1_m(
@@ -85,7 +85,7 @@ void test_vsseg3e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsseg3e64(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m2_m(
@@ -94,7 +94,7 @@ void test_vsseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsseg3e64(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m1_m(
@@ -103,7 +103,7 @@ void test_vsseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsseg3e64(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m2_m(
@@ -112,6 +112,6 @@ void test_vsseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsseg3e64(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e64(mask, base, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e8.c
index e5feedb8060e..226aa5817b79 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg3e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vsseg3e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vsseg3e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vsseg3e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m2(
@@ -48,7 +48,7 @@ void test_vsseg3e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf8(
@@ -57,7 +57,7 @@ void test_vsseg3e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf4(
@@ -66,7 +66,7 @@ void test_vsseg3e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf2(
@@ -75,7 +75,7 @@ void test_vsseg3e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m1(
@@ -84,7 +84,7 @@ void test_vsseg3e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m2(
@@ -93,7 +93,7 @@ void test_vsseg3e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsseg3e8(base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf8_m(
@@ -102,7 +102,7 @@ void test_vsseg3e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf4_m(
@@ -111,7 +111,7 @@ void test_vsseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf2_m(
@@ -120,7 +120,7 @@ void test_vsseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m1_m(
@@ -129,7 +129,7 @@ void test_vsseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m2_m(
@@ -138,7 +138,7 @@ void test_vsseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf8_m(
@@ -147,7 +147,7 @@ void test_vsseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf4_m(
@@ -156,7 +156,7 @@ void test_vsseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf2_m(
@@ -165,7 +165,7 @@ void test_vsseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m1_m(
@@ -174,7 +174,7 @@ void test_vsseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m2_m(
@@ -183,6 +183,6 @@ void test_vsseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg3e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsseg3e8(mask, base, v0, v1, v2, vl);
+ return __riscv_vsseg3e8(mask, base, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e16.c
index e3b9af615399..56c2c0fc7918 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsseg4e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsseg4e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsseg4e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf4(
@@ -49,7 +49,7 @@ void test_vsseg4e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf2(
@@ -58,7 +58,7 @@ void test_vsseg4e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m1(
@@ -67,7 +67,7 @@ void test_vsseg4e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m2(
@@ -76,7 +76,7 @@ void test_vsseg4e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf4(
@@ -85,7 +85,7 @@ void test_vsseg4e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf2(
@@ -94,7 +94,7 @@ void test_vsseg4e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m1(
@@ -103,7 +103,7 @@ void test_vsseg4e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m2(
@@ -112,7 +112,7 @@ void test_vsseg4e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsseg4e16(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4_m(
@@ -121,7 +121,7 @@ void test_vsseg4e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2_m(
@@ -130,7 +130,7 @@ void test_vsseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1_m(
@@ -139,7 +139,7 @@ void test_vsseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2_m(
@@ -148,7 +148,7 @@ void test_vsseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf4_m(
@@ -157,7 +157,7 @@ void test_vsseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf2_m(
@@ -166,7 +166,7 @@ void test_vsseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m1_m(
@@ -175,7 +175,7 @@ void test_vsseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m2_m(
@@ -184,7 +184,7 @@ void test_vsseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf4_m(
@@ -193,7 +193,7 @@ void test_vsseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint1
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf2_m(
@@ -202,7 +202,7 @@ void test_vsseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m1_m(
@@ -211,7 +211,7 @@ void test_vsseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m2_m(
@@ -220,6 +220,6 @@ void test_vsseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsseg4e16(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e16(mask, base, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e32.c
index 767b004c49e4..62e49fdb91ed 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsseg4e32(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vsseg4e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsseg4e32(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m2(
@@ -31,7 +31,7 @@ void test_vsseg4e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsseg4e32(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_i32mf2(
@@ -40,7 +40,7 @@ void test_vsseg4e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsseg4e32(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m1(
@@ -49,7 +49,7 @@ void test_vsseg4e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsseg4e32(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m2(
@@ -58,7 +58,7 @@ void test_vsseg4e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsseg4e32(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_u32mf2(
@@ -67,7 +67,7 @@ void test_vsseg4e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsseg4e32(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m1(
@@ -76,7 +76,7 @@ void test_vsseg4e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsseg4e32(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m2(
@@ -85,7 +85,7 @@ void test_vsseg4e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsseg4e32(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_f32mf2_m(
@@ -94,7 +94,7 @@ void test_vsseg4e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsseg4e32(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m1_m(
@@ -103,7 +103,7 @@ void test_vsseg4e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsseg4e32(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m2_m(
@@ -112,7 +112,7 @@ void test_vsseg4e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsseg4e32(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_i32mf2_m(
@@ -121,7 +121,7 @@ void test_vsseg4e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsseg4e32(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m1_m(
@@ -130,7 +130,7 @@ void test_vsseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsseg4e32(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m2_m(
@@ -139,7 +139,7 @@ void test_vsseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsseg4e32(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_u32mf2_m(
@@ -148,7 +148,7 @@ void test_vsseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsseg4e32(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m1_m(
@@ -157,7 +157,7 @@ void test_vsseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsseg4e32(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m2_m(
@@ -166,6 +166,6 @@ void test_vsseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsseg4e32(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e32(mask, base, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e64.c
index 87b3481c36db..fd282aa9cbe7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsseg4e64(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m2(
@@ -22,7 +22,7 @@ void test_vsseg4e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsseg4e64(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m1(
@@ -31,7 +31,7 @@ void test_vsseg4e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsseg4e64(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m2(
@@ -40,7 +40,7 @@ void test_vsseg4e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsseg4e64(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m1(
@@ -49,7 +49,7 @@ void test_vsseg4e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsseg4e64(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m2(
@@ -58,7 +58,7 @@ void test_vsseg4e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsseg4e64(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m1_m(
@@ -67,7 +67,7 @@ void test_vsseg4e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsseg4e64(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m2_m(
@@ -76,7 +76,7 @@ void test_vsseg4e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsseg4e64(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m1_m(
@@ -85,7 +85,7 @@ void test_vsseg4e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsseg4e64(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m2_m(
@@ -94,7 +94,7 @@ void test_vsseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsseg4e64(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m1_m(
@@ -103,7 +103,7 @@ void test_vsseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsseg4e64(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m2_m(
@@ -112,6 +112,6 @@ void test_vsseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsseg4e64(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e64(mask, base, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e8.c
index 9799da5dcd0a..658231bbbfde 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg4e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vsseg4e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vsseg4e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vsseg4e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m2(
@@ -48,7 +48,7 @@ void test_vsseg4e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf8(
@@ -57,7 +57,7 @@ void test_vsseg4e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf4(
@@ -66,7 +66,7 @@ void test_vsseg4e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf2(
@@ -75,7 +75,7 @@ void test_vsseg4e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m1(
@@ -84,7 +84,7 @@ void test_vsseg4e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m2(
@@ -93,7 +93,7 @@ void test_vsseg4e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsseg4e8(base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf8_m(
@@ -102,7 +102,7 @@ void test_vsseg4e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf4_m(
@@ -111,7 +111,7 @@ void test_vsseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf2_m(
@@ -120,7 +120,7 @@ void test_vsseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m1_m(
@@ -129,7 +129,7 @@ void test_vsseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m2_m(
@@ -138,7 +138,7 @@ void test_vsseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf8_m(
@@ -147,7 +147,7 @@ void test_vsseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf4_m(
@@ -156,7 +156,7 @@ void test_vsseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf2_m(
@@ -165,7 +165,7 @@ void test_vsseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m1_m(
@@ -174,7 +174,7 @@ void test_vsseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m2_m(
@@ -183,6 +183,6 @@ void test_vsseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg4e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsseg4e8(mask, base, v0, v1, v2, v3, vl);
+ return __riscv_vsseg4e8(mask, base, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e16.c
index 61ed86b77f4f..c50a761cae54 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsseg5e16(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsseg5e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsseg5e16(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsseg5e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsseg5e16(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf4(
@@ -40,7 +40,7 @@ void test_vsseg5e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsseg5e16(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf2(
@@ -49,7 +49,7 @@ void test_vsseg5e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsseg5e16(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_i16m1(
@@ -58,7 +58,7 @@ void test_vsseg5e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsseg5e16(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf4(
@@ -67,7 +67,7 @@ void test_vsseg5e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsseg5e16(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf2(
@@ -76,7 +76,7 @@ void test_vsseg5e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsseg5e16(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_u16m1(
@@ -85,7 +85,7 @@ void test_vsseg5e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsseg5e16(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4_m(
@@ -94,7 +94,7 @@ void test_vsseg5e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2_m(
@@ -103,7 +103,7 @@ void test_vsseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1_m(
@@ -112,7 +112,7 @@ void test_vsseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf4_m(
@@ -121,7 +121,7 @@ void test_vsseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf2_m(
@@ -130,7 +130,7 @@ void test_vsseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_i16m1_m(
@@ -139,7 +139,7 @@ void test_vsseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf4_m(
@@ -148,7 +148,7 @@ void test_vsseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf2_m(
@@ -157,7 +157,7 @@ void test_vsseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e16_v_u16m1_m(
@@ -166,6 +166,6 @@ void test_vsseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e32.c
index bd686df78fc5..4fad266a281d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsseg5e32(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vsseg5e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsseg5e32(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_i32mf2(
@@ -31,7 +31,7 @@ void test_vsseg5e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsseg5e32(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_i32m1(
@@ -40,7 +40,7 @@ void test_vsseg5e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsseg5e32(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_u32mf2(
@@ -49,7 +49,7 @@ void test_vsseg5e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsseg5e32(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_u32m1(
@@ -58,7 +58,7 @@ void test_vsseg5e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsseg5e32(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_f32mf2_m(
@@ -67,7 +67,7 @@ void test_vsseg5e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_f32m1_m(
@@ -76,7 +76,7 @@ void test_vsseg5e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_i32mf2_m(
@@ -85,7 +85,7 @@ void test_vsseg5e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_i32m1_m(
@@ -94,7 +94,7 @@ void test_vsseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_u32mf2_m(
@@ -103,7 +103,7 @@ void test_vsseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e32_v_u32m1_m(
@@ -112,6 +112,6 @@ void test_vsseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e64.c
index 3a21d3fde285..0172189a47e7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsseg5e64(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e64(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e64_v_i64m1(
@@ -22,7 +22,7 @@ void test_vsseg5e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsseg5e64(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e64(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e64_v_u64m1(
@@ -31,7 +31,7 @@ void test_vsseg5e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsseg5e64(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e64(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e64_v_f64m1_m(
@@ -40,7 +40,7 @@ void test_vsseg5e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsseg5e64(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e64(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vsseg5e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsseg5e64(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e64(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e64_v_u64m1_m(
@@ -58,6 +58,6 @@ void test_vsseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsseg5e64(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e64(mask, base, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e8.c
index a7235ac88b66..68b04426e232 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg5e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsseg5e8(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vsseg5e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsseg5e8(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vsseg5e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsseg5e8(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vsseg5e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsseg5e8(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf8(
@@ -48,7 +48,7 @@ void test_vsseg5e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsseg5e8(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf4(
@@ -57,7 +57,7 @@ void test_vsseg5e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsseg5e8(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf2(
@@ -66,7 +66,7 @@ void test_vsseg5e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsseg5e8(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_u8m1(
@@ -75,7 +75,7 @@ void test_vsseg5e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsseg5e8(base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf8_m(
@@ -84,7 +84,7 @@ void test_vsseg5e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf4_m(
@@ -93,7 +93,7 @@ void test_vsseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf2_m(
@@ -102,7 +102,7 @@ void test_vsseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_i8m1_m(
@@ -111,7 +111,7 @@ void test_vsseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf8_m(
@@ -120,7 +120,7 @@ void test_vsseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vsseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf2_m(
@@ -138,7 +138,7 @@ void test_vsseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsseg5e8_v_u8m1_m(
@@ -147,6 +147,6 @@ void test_vsseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg5e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e16.c
index f659242c5bb5..d702e2c9ddc3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsseg6e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsseg6e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf4(
@@ -40,7 +40,7 @@ void test_vsseg6e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf2(
@@ -49,7 +49,7 @@ void test_vsseg6e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_i16m1(
@@ -58,7 +58,7 @@ void test_vsseg6e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf4(
@@ -67,7 +67,7 @@ void test_vsseg6e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf2(
@@ -76,7 +76,7 @@ void test_vsseg6e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_u16m1(
@@ -85,7 +85,7 @@ void test_vsseg6e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4_m(
@@ -94,7 +94,7 @@ void test_vsseg6e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2_m(
@@ -103,7 +103,7 @@ void test_vsseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1_m(
@@ -112,7 +112,7 @@ void test_vsseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf4_m(
@@ -121,7 +121,7 @@ void test_vsseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf2_m(
@@ -130,7 +130,7 @@ void test_vsseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_i16m1_m(
@@ -139,7 +139,7 @@ void test_vsseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf4_m(
@@ -148,7 +148,7 @@ void test_vsseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf2_m(
@@ -157,7 +157,7 @@ void test_vsseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e16_v_u16m1_m(
@@ -166,6 +166,6 @@ void test_vsseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e32.c
index 4571c135f7d4..aac19131c6eb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vsseg6e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_i32mf2(
@@ -31,7 +31,7 @@ void test_vsseg6e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_i32m1(
@@ -40,7 +40,7 @@ void test_vsseg6e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_u32mf2(
@@ -49,7 +49,7 @@ void test_vsseg6e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_u32m1(
@@ -58,7 +58,7 @@ void test_vsseg6e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_f32mf2_m(
@@ -67,7 +67,7 @@ void test_vsseg6e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_f32m1_m(
@@ -76,7 +76,7 @@ void test_vsseg6e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_i32mf2_m(
@@ -85,7 +85,7 @@ void test_vsseg6e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_i32m1_m(
@@ -94,7 +94,7 @@ void test_vsseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_u32mf2_m(
@@ -103,7 +103,7 @@ void test_vsseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e32_v_u32m1_m(
@@ -112,6 +112,6 @@ void test_vsseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e64.c
index f0f14d7a301e..7d9aeadc3e71 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsseg6e64(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e64(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e64_v_i64m1(
@@ -22,7 +22,7 @@ void test_vsseg6e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsseg6e64(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e64(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e64_v_u64m1(
@@ -31,7 +31,7 @@ void test_vsseg6e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsseg6e64(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e64(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e64_v_f64m1_m(
@@ -40,7 +40,7 @@ void test_vsseg6e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsseg6e64(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e64(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vsseg6e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsseg6e64(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e64(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e64_v_u64m1_m(
@@ -58,6 +58,6 @@ void test_vsseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsseg6e64(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e64(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e8.c
index 13bc6f8498b5..28850a7582a3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg6e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vsseg6e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vsseg6e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vsseg6e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf8(
@@ -48,7 +48,7 @@ void test_vsseg6e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf4(
@@ -57,7 +57,7 @@ void test_vsseg6e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf2(
@@ -66,7 +66,7 @@ void test_vsseg6e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_u8m1(
@@ -75,7 +75,7 @@ void test_vsseg6e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf8_m(
@@ -84,7 +84,7 @@ void test_vsseg6e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf4_m(
@@ -93,7 +93,7 @@ void test_vsseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf2_m(
@@ -102,7 +102,7 @@ void test_vsseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_i8m1_m(
@@ -111,7 +111,7 @@ void test_vsseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf8_m(
@@ -120,7 +120,7 @@ void test_vsseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vsseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf2_m(
@@ -138,7 +138,7 @@ void test_vsseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsseg6e8_v_u8m1_m(
@@ -147,6 +147,6 @@ void test_vsseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg6e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e16.c
index 69ef14aaa1a4..df41390a0551 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsseg7e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsseg7e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf4(
@@ -40,7 +40,7 @@ void test_vsseg7e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf2(
@@ -49,7 +49,7 @@ void test_vsseg7e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_i16m1(
@@ -58,7 +58,7 @@ void test_vsseg7e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf4(
@@ -67,7 +67,7 @@ void test_vsseg7e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf2(
@@ -76,7 +76,7 @@ void test_vsseg7e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_u16m1(
@@ -85,7 +85,7 @@ void test_vsseg7e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4_m(
@@ -94,7 +94,7 @@ void test_vsseg7e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2_m(
@@ -103,7 +103,7 @@ void test_vsseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1_m(
@@ -112,7 +112,7 @@ void test_vsseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf4_m(
@@ -121,7 +121,7 @@ void test_vsseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf2_m(
@@ -130,7 +130,7 @@ void test_vsseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_i16m1_m(
@@ -139,7 +139,7 @@ void test_vsseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf4_m(
@@ -148,7 +148,7 @@ void test_vsseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf2_m(
@@ -157,7 +157,7 @@ void test_vsseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e16_v_u16m1_m(
@@ -166,6 +166,6 @@ void test_vsseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e32.c
index 3ef0035dd0b8..9463d809a6c7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vsseg7e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_i32mf2(
@@ -31,7 +31,7 @@ void test_vsseg7e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_i32m1(
@@ -40,7 +40,7 @@ void test_vsseg7e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_u32mf2(
@@ -49,7 +49,7 @@ void test_vsseg7e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_u32m1(
@@ -58,7 +58,7 @@ void test_vsseg7e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_f32mf2_m(
@@ -67,7 +67,7 @@ void test_vsseg7e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_f32m1_m(
@@ -76,7 +76,7 @@ void test_vsseg7e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_i32mf2_m(
@@ -85,7 +85,7 @@ void test_vsseg7e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_i32m1_m(
@@ -94,7 +94,7 @@ void test_vsseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_u32mf2_m(
@@ -103,7 +103,7 @@ void test_vsseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e32_v_u32m1_m(
@@ -112,6 +112,6 @@ void test_vsseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e64.c
index 2b6a71988fef..7d764faf5d4d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsseg7e64(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e64(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e64_v_i64m1(
@@ -22,7 +22,7 @@ void test_vsseg7e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsseg7e64(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e64(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e64_v_u64m1(
@@ -31,7 +31,7 @@ void test_vsseg7e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsseg7e64(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e64(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e64_v_f64m1_m(
@@ -40,7 +40,7 @@ void test_vsseg7e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsseg7e64(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e64(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vsseg7e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsseg7e64(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e64(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e64_v_u64m1_m(
@@ -58,6 +58,6 @@ void test_vsseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsseg7e64(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e64(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e8.c
index bd1f9d54c29e..b8361a2f7f4f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg7e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vsseg7e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vsseg7e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vsseg7e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf8(
@@ -48,7 +48,7 @@ void test_vsseg7e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf4(
@@ -57,7 +57,7 @@ void test_vsseg7e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf2(
@@ -66,7 +66,7 @@ void test_vsseg7e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_u8m1(
@@ -75,7 +75,7 @@ void test_vsseg7e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf8_m(
@@ -84,7 +84,7 @@ void test_vsseg7e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf4_m(
@@ -93,7 +93,7 @@ void test_vsseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf2_m(
@@ -102,7 +102,7 @@ void test_vsseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_i8m1_m(
@@ -111,7 +111,7 @@ void test_vsseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf8_m(
@@ -120,7 +120,7 @@ void test_vsseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vsseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf2_m(
@@ -138,7 +138,7 @@ void test_vsseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsseg7e8_v_u8m1_m(
@@ -147,6 +147,6 @@ void test_vsseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg7e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e16.c
index 4d5fb75db5d1..1d7fb2711371 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsseg8e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsseg8e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf4(
@@ -40,7 +40,7 @@ void test_vsseg8e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf2(
@@ -49,7 +49,7 @@ void test_vsseg8e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_i16m1(
@@ -58,7 +58,7 @@ void test_vsseg8e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf4(
@@ -67,7 +67,7 @@ void test_vsseg8e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf2(
@@ -76,7 +76,7 @@ void test_vsseg8e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_u16m1(
@@ -85,7 +85,7 @@ void test_vsseg8e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4_m(
@@ -94,7 +94,7 @@ void test_vsseg8e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2_m(
@@ -103,7 +103,7 @@ void test_vsseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1_m(
@@ -112,7 +112,7 @@ void test_vsseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf4_m(
@@ -121,7 +121,7 @@ void test_vsseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf2_m(
@@ -130,7 +130,7 @@ void test_vsseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_i16m1_m(
@@ -139,7 +139,7 @@ void test_vsseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf4_m(
@@ -148,7 +148,7 @@ void test_vsseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf2_m(
@@ -157,7 +157,7 @@ void test_vsseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e16_v_u16m1_m(
@@ -166,6 +166,6 @@ void test_vsseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e32.c
index 26fa8f837a02..11dd6216e17e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vsseg8e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_i32mf2(
@@ -31,7 +31,7 @@ void test_vsseg8e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_i32m1(
@@ -40,7 +40,7 @@ void test_vsseg8e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_u32mf2(
@@ -49,7 +49,7 @@ void test_vsseg8e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_u32m1(
@@ -58,7 +58,7 @@ void test_vsseg8e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_f32mf2_m(
@@ -67,7 +67,7 @@ void test_vsseg8e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_f32m1_m(
@@ -76,7 +76,7 @@ void test_vsseg8e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_i32mf2_m(
@@ -85,7 +85,7 @@ void test_vsseg8e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_i32m1_m(
@@ -94,7 +94,7 @@ void test_vsseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_u32mf2_m(
@@ -103,7 +103,7 @@ void test_vsseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e32_v_u32m1_m(
@@ -112,6 +112,6 @@ void test_vsseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e64.c
index 065be82ab250..b45065494c8c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsseg8e64(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e64(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e64_v_i64m1(
@@ -22,7 +22,7 @@ void test_vsseg8e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsseg8e64(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e64(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e64_v_u64m1(
@@ -31,7 +31,7 @@ void test_vsseg8e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsseg8e64(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e64(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e64_v_f64m1_m(
@@ -40,7 +40,7 @@ void test_vsseg8e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsseg8e64(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e64(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vsseg8e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsseg8e64(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e64(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e64_v_u64m1_m(
@@ -58,6 +58,6 @@ void test_vsseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsseg8e64(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e64(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e8.c
index f3e8a28ca8a8..eba8fcfb740c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg8e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vsseg8e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vsseg8e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vsseg8e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf8(
@@ -48,7 +48,7 @@ void test_vsseg8e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf4(
@@ -57,7 +57,7 @@ void test_vsseg8e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf2(
@@ -66,7 +66,7 @@ void test_vsseg8e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_u8m1(
@@ -75,7 +75,7 @@ void test_vsseg8e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf8_m(
@@ -84,7 +84,7 @@ void test_vsseg8e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf4_m(
@@ -93,7 +93,7 @@ void test_vsseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf2_m(
@@ -102,7 +102,7 @@ void test_vsseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_i8m1_m(
@@ -111,7 +111,7 @@ void test_vsseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf8_m(
@@ -120,7 +120,7 @@ void test_vsseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vsseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf2_m(
@@ -138,7 +138,7 @@ void test_vsseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsseg8e8_v_u8m1_m(
@@ -147,6 +147,6 @@ void test_vsseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsseg8e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c
index d1cd6c80d0e9..a4e9b100143b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
- return vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shif
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shif
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shif
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shif
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shif
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shif
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) {
- return vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c
index a935c095ad04..dedfc2c39ee8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
- return vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t sh
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) {
- return vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e16.c
index 6a20ce03b12f..2e09f0d00be4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vssseg2e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vssseg2e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vssseg2e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4(
@@ -49,7 +49,7 @@ void test_vssseg2e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16mf4(
@@ -58,7 +58,7 @@ void test_vssseg2e16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16mf2(
@@ -67,7 +67,7 @@ void test_vssseg2e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m1(
@@ -76,7 +76,7 @@ void test_vssseg2e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m2(
@@ -85,7 +85,7 @@ void test_vssseg2e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m4(
@@ -94,7 +94,7 @@ void test_vssseg2e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16mf4(
@@ -103,7 +103,7 @@ void test_vssseg2e16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16mf2(
@@ -112,7 +112,7 @@ void test_vssseg2e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m1(
@@ -121,7 +121,7 @@ void test_vssseg2e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m2(
@@ -130,7 +130,7 @@ void test_vssseg2e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m4(
@@ -139,7 +139,7 @@ void test_vssseg2e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vssseg2e16(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4_m(
@@ -148,7 +148,7 @@ void test_vssseg2e16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2_m(
@@ -157,7 +157,7 @@ void test_vssseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1_m(
@@ -166,7 +166,7 @@ void test_vssseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2_m(
@@ -175,7 +175,7 @@ void test_vssseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4_m(
@@ -184,7 +184,7 @@ void test_vssseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16mf4_m(
@@ -193,7 +193,7 @@ void test_vssseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16mf2_m(
@@ -202,7 +202,7 @@ void test_vssseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m1_m(
@@ -211,7 +211,7 @@ void test_vssseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m2_m(
@@ -220,7 +220,7 @@ void test_vssseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m4_m(
@@ -229,7 +229,7 @@ void test_vssseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16mf4_m(
@@ -238,7 +238,7 @@ void test_vssseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16mf2_m(
@@ -247,7 +247,7 @@ void test_vssseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m1_m(
@@ -256,7 +256,7 @@ void test_vssseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m2_m(
@@ -265,7 +265,7 @@ void test_vssseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m4_m(
@@ -274,6 +274,6 @@ void test_vssseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vssseg2e16(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32.c
index d8c218225f5f..e9d1372f2585 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vssseg2e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m2(
@@ -31,7 +31,7 @@ void test_vssseg2e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m4(
@@ -40,7 +40,7 @@ void test_vssseg2e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_i32mf2(
@@ -49,7 +49,7 @@ void test_vssseg2e32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m1(
@@ -58,7 +58,7 @@ void test_vssseg2e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m2(
@@ -67,7 +67,7 @@ void test_vssseg2e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m4(
@@ -76,7 +76,7 @@ void test_vssseg2e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_u32mf2(
@@ -85,7 +85,7 @@ void test_vssseg2e32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m1(
@@ -94,7 +94,7 @@ void test_vssseg2e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m2(
@@ -103,7 +103,7 @@ void test_vssseg2e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m4(
@@ -112,7 +112,7 @@ void test_vssseg2e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vssseg2e32(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_f32mf2_m(
@@ -121,7 +121,7 @@ void test_vssseg2e32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m1_m(
@@ -130,7 +130,7 @@ void test_vssseg2e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m2_m(
@@ -139,7 +139,7 @@ void test_vssseg2e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m4_m(
@@ -148,7 +148,7 @@ void test_vssseg2e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_i32mf2_m(
@@ -157,7 +157,7 @@ void test_vssseg2e32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m1_m(
@@ -166,7 +166,7 @@ void test_vssseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m2_m(
@@ -175,7 +175,7 @@ void test_vssseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m4_m(
@@ -184,7 +184,7 @@ void test_vssseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_u32mf2_m(
@@ -193,7 +193,7 @@ void test_vssseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m1_m(
@@ -202,7 +202,7 @@ void test_vssseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m2_m(
@@ -211,7 +211,7 @@ void test_vssseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m4_m(
@@ -220,6 +220,6 @@ void test_vssseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vssseg2e32(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e64.c
index 1d6f85eb7da1..9489141e81c9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vssseg2e64(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m2(
@@ -22,7 +22,7 @@ void test_vssseg2e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vssseg2e64(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m4(
@@ -31,7 +31,7 @@ void test_vssseg2e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vssseg2e64(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m1(
@@ -40,7 +40,7 @@ void test_vssseg2e64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vssseg2e64(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m2(
@@ -49,7 +49,7 @@ void test_vssseg2e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vssseg2e64(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m4(
@@ -58,7 +58,7 @@ void test_vssseg2e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vssseg2e64(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m1(
@@ -67,7 +67,7 @@ void test_vssseg2e64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vssseg2e64(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m2(
@@ -76,7 +76,7 @@ void test_vssseg2e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vssseg2e64(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m4(
@@ -85,7 +85,7 @@ void test_vssseg2e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vssseg2e64(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m1_m(
@@ -94,7 +94,7 @@ void test_vssseg2e64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vssseg2e64(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m2_m(
@@ -103,7 +103,7 @@ void test_vssseg2e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vssseg2e64(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m4_m(
@@ -112,7 +112,7 @@ void test_vssseg2e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vssseg2e64(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m1_m(
@@ -121,7 +121,7 @@ void test_vssseg2e64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vssseg2e64(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m2_m(
@@ -130,7 +130,7 @@ void test_vssseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vssseg2e64(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m4_m(
@@ -139,7 +139,7 @@ void test_vssseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vssseg2e64(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m1_m(
@@ -148,7 +148,7 @@ void test_vssseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vssseg2e64(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m2_m(
@@ -157,7 +157,7 @@ void test_vssseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vssseg2e64(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m4_m(
@@ -166,6 +166,6 @@ void test_vssseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vssseg2e64(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e8.c
index 8d046ccdeb35..f7d9f4614e0d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vssseg2e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vssseg2e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vssseg2e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m2(
@@ -48,7 +48,7 @@ void test_vssseg2e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m4(
@@ -57,7 +57,7 @@ void test_vssseg2e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf8(
@@ -66,7 +66,7 @@ void test_vssseg2e8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf4(
@@ -75,7 +75,7 @@ void test_vssseg2e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf2(
@@ -84,7 +84,7 @@ void test_vssseg2e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m1(
@@ -93,7 +93,7 @@ void test_vssseg2e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m2(
@@ -102,7 +102,7 @@ void test_vssseg2e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m4(
@@ -111,7 +111,7 @@ void test_vssseg2e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vssseg2e8(base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf8_m(
@@ -120,7 +120,7 @@ void test_vssseg2e8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf4_m(
@@ -129,7 +129,7 @@ void test_vssseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf2_m(
@@ -138,7 +138,7 @@ void test_vssseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m1_m(
@@ -147,7 +147,7 @@ void test_vssseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m2_m(
@@ -156,7 +156,7 @@ void test_vssseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m4_m(
@@ -165,7 +165,7 @@ void test_vssseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf8_m(
@@ -174,7 +174,7 @@ void test_vssseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf4_m(
@@ -183,7 +183,7 @@ void test_vssseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf2_m(
@@ -192,7 +192,7 @@ void test_vssseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m1_m(
@@ -201,7 +201,7 @@ void test_vssseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m2_m(
@@ -210,7 +210,7 @@ void test_vssseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m4_m(
@@ -219,6 +219,6 @@ void test_vssseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vssseg2e8(mask, base, bstride, v0, v1, vl);
+ return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e16.c
index 2a1cb7fd9aec..52643f76d54d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vssseg3e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vssseg3e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vssseg3e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_i16mf4(
@@ -49,7 +49,7 @@ void test_vssseg3e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_i16mf2(
@@ -58,7 +58,7 @@ void test_vssseg3e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_i16m1(
@@ -67,7 +67,7 @@ void test_vssseg3e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_i16m2(
@@ -76,7 +76,7 @@ void test_vssseg3e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_u16mf4(
@@ -85,7 +85,7 @@ void test_vssseg3e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_u16mf2(
@@ -94,7 +94,7 @@ void test_vssseg3e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_u16m1(
@@ -103,7 +103,7 @@ void test_vssseg3e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_u16m2(
@@ -112,7 +112,7 @@ void test_vssseg3e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vssseg3e16(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4_m(
@@ -121,7 +121,7 @@ void test_vssseg3e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2_m(
@@ -130,7 +130,7 @@ void test_vssseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1_m(
@@ -139,7 +139,7 @@ void test_vssseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2_m(
@@ -148,7 +148,7 @@ void test_vssseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_i16mf4_m(
@@ -157,7 +157,7 @@ void test_vssseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_i16mf2_m(
@@ -166,7 +166,7 @@ void test_vssseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_i16m1_m(
@@ -175,7 +175,7 @@ void test_vssseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_i16m2_m(
@@ -184,7 +184,7 @@ void test_vssseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_u16mf4_m(
@@ -193,7 +193,7 @@ void test_vssseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_u16mf2_m(
@@ -202,7 +202,7 @@ void test_vssseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_u16m1_m(
@@ -211,7 +211,7 @@ void test_vssseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e16_v_u16m2_m(
@@ -220,6 +220,6 @@ void test_vssseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e32.c
index 871aa60af230..157b892a5608 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vssseg3e32(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vssseg3e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vssseg3e32(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_f32m2(
@@ -31,7 +31,7 @@ void test_vssseg3e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vssseg3e32(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_i32mf2(
@@ -40,7 +40,7 @@ void test_vssseg3e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vssseg3e32(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_i32m1(
@@ -49,7 +49,7 @@ void test_vssseg3e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vssseg3e32(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_i32m2(
@@ -58,7 +58,7 @@ void test_vssseg3e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vssseg3e32(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_u32mf2(
@@ -67,7 +67,7 @@ void test_vssseg3e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vssseg3e32(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_u32m1(
@@ -76,7 +76,7 @@ void test_vssseg3e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vssseg3e32(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_u32m2(
@@ -85,7 +85,7 @@ void test_vssseg3e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vssseg3e32(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_f32mf2_m(
@@ -94,7 +94,7 @@ void test_vssseg3e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_f32m1_m(
@@ -103,7 +103,7 @@ void test_vssseg3e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_f32m2_m(
@@ -112,7 +112,7 @@ void test_vssseg3e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_i32mf2_m(
@@ -121,7 +121,7 @@ void test_vssseg3e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_i32m1_m(
@@ -130,7 +130,7 @@ void test_vssseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_i32m2_m(
@@ -139,7 +139,7 @@ void test_vssseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_u32mf2_m(
@@ -148,7 +148,7 @@ void test_vssseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_u32m1_m(
@@ -157,7 +157,7 @@ void test_vssseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e32_v_u32m2_m(
@@ -166,6 +166,6 @@ void test_vssseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e64.c
index ee9640e807b8..dba932dee7f5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vssseg3e64(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_f64m2(
@@ -22,7 +22,7 @@ void test_vssseg3e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vssseg3e64(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_i64m1(
@@ -31,7 +31,7 @@ void test_vssseg3e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vssseg3e64(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_i64m2(
@@ -40,7 +40,7 @@ void test_vssseg3e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vssseg3e64(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_u64m1(
@@ -49,7 +49,7 @@ void test_vssseg3e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vssseg3e64(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_u64m2(
@@ -58,7 +58,7 @@ void test_vssseg3e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vssseg3e64(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_f64m1_m(
@@ -67,7 +67,7 @@ void test_vssseg3e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_f64m2_m(
@@ -76,7 +76,7 @@ void test_vssseg3e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_i64m1_m(
@@ -85,7 +85,7 @@ void test_vssseg3e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_i64m2_m(
@@ -94,7 +94,7 @@ void test_vssseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_u64m1_m(
@@ -103,7 +103,7 @@ void test_vssseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e64_v_u64m2_m(
@@ -112,6 +112,6 @@ void test_vssseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e8.c
index f43d5ad585b1..9ed8700668fc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vssseg3e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vssseg3e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vssseg3e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_i8m2(
@@ -48,7 +48,7 @@ void test_vssseg3e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf8(
@@ -57,7 +57,7 @@ void test_vssseg3e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf4(
@@ -66,7 +66,7 @@ void test_vssseg3e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf2(
@@ -75,7 +75,7 @@ void test_vssseg3e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8m1(
@@ -84,7 +84,7 @@ void test_vssseg3e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8m2(
@@ -93,7 +93,7 @@ void test_vssseg3e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vssseg3e8(base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf8_m(
@@ -102,7 +102,7 @@ void test_vssseg3e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf4_m(
@@ -111,7 +111,7 @@ void test_vssseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf2_m(
@@ -120,7 +120,7 @@ void test_vssseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_i8m1_m(
@@ -129,7 +129,7 @@ void test_vssseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_i8m2_m(
@@ -138,7 +138,7 @@ void test_vssseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf8_m(
@@ -147,7 +147,7 @@ void test_vssseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf4_m(
@@ -156,7 +156,7 @@ void test_vssseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf2_m(
@@ -165,7 +165,7 @@ void test_vssseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8m1_m(
@@ -174,7 +174,7 @@ void test_vssseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vssseg3e8_v_u8m2_m(
@@ -183,6 +183,6 @@ void test_vssseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg3e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
+ return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e16.c
index 7bc8f002c7cc..6c75e24778f0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vssseg4e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vssseg4e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vssseg4e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_i16mf4(
@@ -49,7 +49,7 @@ void test_vssseg4e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_i16mf2(
@@ -58,7 +58,7 @@ void test_vssseg4e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_i16m1(
@@ -67,7 +67,7 @@ void test_vssseg4e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_i16m2(
@@ -76,7 +76,7 @@ void test_vssseg4e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_u16mf4(
@@ -85,7 +85,7 @@ void test_vssseg4e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_u16mf2(
@@ -94,7 +94,7 @@ void test_vssseg4e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_u16m1(
@@ -103,7 +103,7 @@ void test_vssseg4e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_u16m2(
@@ -112,7 +112,7 @@ void test_vssseg4e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4_m(
@@ -121,7 +121,7 @@ void test_vssseg4e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2_m(
@@ -130,7 +130,7 @@ void test_vssseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1_m(
@@ -139,7 +139,7 @@ void test_vssseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2_m(
@@ -148,7 +148,7 @@ void test_vssseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_i16mf4_m(
@@ -157,7 +157,7 @@ void test_vssseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_i16mf2_m(
@@ -166,7 +166,7 @@ void test_vssseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_i16m1_m(
@@ -175,7 +175,7 @@ void test_vssseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_i16m2_m(
@@ -184,7 +184,7 @@ void test_vssseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_u16mf4_m(
@@ -193,7 +193,7 @@ void test_vssseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_u16mf2_m(
@@ -202,7 +202,7 @@ void test_vssseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_u16m1_m(
@@ -211,7 +211,7 @@ void test_vssseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e16_v_u16m2_m(
@@ -220,6 +220,6 @@ void test_vssseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e32.c
index 994d75c3460f..b7f5fbaf3d43 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vssseg4e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_f32m2(
@@ -31,7 +31,7 @@ void test_vssseg4e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_i32mf2(
@@ -40,7 +40,7 @@ void test_vssseg4e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_i32m1(
@@ -49,7 +49,7 @@ void test_vssseg4e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_i32m2(
@@ -58,7 +58,7 @@ void test_vssseg4e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_u32mf2(
@@ -67,7 +67,7 @@ void test_vssseg4e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_u32m1(
@@ -76,7 +76,7 @@ void test_vssseg4e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_u32m2(
@@ -85,7 +85,7 @@ void test_vssseg4e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_f32mf2_m(
@@ -94,7 +94,7 @@ void test_vssseg4e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_f32m1_m(
@@ -103,7 +103,7 @@ void test_vssseg4e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_f32m2_m(
@@ -112,7 +112,7 @@ void test_vssseg4e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_i32mf2_m(
@@ -121,7 +121,7 @@ void test_vssseg4e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_i32m1_m(
@@ -130,7 +130,7 @@ void test_vssseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_i32m2_m(
@@ -139,7 +139,7 @@ void test_vssseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_u32mf2_m(
@@ -148,7 +148,7 @@ void test_vssseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_u32m1_m(
@@ -157,7 +157,7 @@ void test_vssseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e32_v_u32m2_m(
@@ -166,6 +166,6 @@ void test_vssseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e64.c
index d1788d34c575..404f048312bd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_f64m2(
@@ -22,7 +22,7 @@ void test_vssseg4e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_i64m1(
@@ -31,7 +31,7 @@ void test_vssseg4e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_i64m2(
@@ -40,7 +40,7 @@ void test_vssseg4e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_u64m1(
@@ -49,7 +49,7 @@ void test_vssseg4e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_u64m2(
@@ -58,7 +58,7 @@ void test_vssseg4e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_f64m1_m(
@@ -67,7 +67,7 @@ void test_vssseg4e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_f64m2_m(
@@ -76,7 +76,7 @@ void test_vssseg4e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_i64m1_m(
@@ -85,7 +85,7 @@ void test_vssseg4e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_i64m2_m(
@@ -94,7 +94,7 @@ void test_vssseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_u64m1_m(
@@ -103,7 +103,7 @@ void test_vssseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e64_v_u64m2_m(
@@ -112,6 +112,6 @@ void test_vssseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e8.c
index c091f7af22c2..d4c34538f7c6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vssseg4e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vssseg4e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vssseg4e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_i8m2(
@@ -48,7 +48,7 @@ void test_vssseg4e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf8(
@@ -57,7 +57,7 @@ void test_vssseg4e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf4(
@@ -66,7 +66,7 @@ void test_vssseg4e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf2(
@@ -75,7 +75,7 @@ void test_vssseg4e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8m1(
@@ -84,7 +84,7 @@ void test_vssseg4e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8m2(
@@ -93,7 +93,7 @@ void test_vssseg4e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf8_m(
@@ -102,7 +102,7 @@ void test_vssseg4e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf4_m(
@@ -111,7 +111,7 @@ void test_vssseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf2_m(
@@ -120,7 +120,7 @@ void test_vssseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_i8m1_m(
@@ -129,7 +129,7 @@ void test_vssseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_i8m2_m(
@@ -138,7 +138,7 @@ void test_vssseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf8_m(
@@ -147,7 +147,7 @@ void test_vssseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf4_m(
@@ -156,7 +156,7 @@ void test_vssseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf2_m(
@@ -165,7 +165,7 @@ void test_vssseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8m1_m(
@@ -174,7 +174,7 @@ void test_vssseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vssseg4e8_v_u8m2_m(
@@ -183,6 +183,6 @@ void test_vssseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg4e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
+ return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e16.c
index a0139df39fec..0d89f23c6a07 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vssseg5e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vssseg5e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_i16mf4(
@@ -40,7 +40,7 @@ void test_vssseg5e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_i16mf2(
@@ -49,7 +49,7 @@ void test_vssseg5e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_i16m1(
@@ -58,7 +58,7 @@ void test_vssseg5e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_u16mf4(
@@ -67,7 +67,7 @@ void test_vssseg5e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_u16mf2(
@@ -76,7 +76,7 @@ void test_vssseg5e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_u16m1(
@@ -85,7 +85,7 @@ void test_vssseg5e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4_m(
@@ -94,7 +94,7 @@ void test_vssseg5e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2_m(
@@ -103,7 +103,7 @@ void test_vssseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1_m(
@@ -112,7 +112,7 @@ void test_vssseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_i16mf4_m(
@@ -121,7 +121,7 @@ void test_vssseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_i16mf2_m(
@@ -130,7 +130,7 @@ void test_vssseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_i16m1_m(
@@ -139,7 +139,7 @@ void test_vssseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_u16mf4_m(
@@ -148,7 +148,7 @@ void test_vssseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_u16mf2_m(
@@ -157,7 +157,7 @@ void test_vssseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e16_v_u16m1_m(
@@ -166,6 +166,6 @@ void test_vssseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e32.c
index 98325f9762d3..d4ca1ff0b4d6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vssseg5e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_i32mf2(
@@ -31,7 +31,7 @@ void test_vssseg5e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_i32m1(
@@ -40,7 +40,7 @@ void test_vssseg5e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_u32mf2(
@@ -49,7 +49,7 @@ void test_vssseg5e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_u32m1(
@@ -58,7 +58,7 @@ void test_vssseg5e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_f32mf2_m(
@@ -67,7 +67,7 @@ void test_vssseg5e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_f32m1_m(
@@ -76,7 +76,7 @@ void test_vssseg5e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_i32mf2_m(
@@ -85,7 +85,7 @@ void test_vssseg5e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_i32m1_m(
@@ -94,7 +94,7 @@ void test_vssseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_u32mf2_m(
@@ -103,7 +103,7 @@ void test_vssseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e32_v_u32m1_m(
@@ -112,6 +112,6 @@ void test_vssseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e64.c
index 40c97cdafcb9..135405d2eeda 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e64_v_i64m1(
@@ -22,7 +22,7 @@ void test_vssseg5e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e64_v_u64m1(
@@ -31,7 +31,7 @@ void test_vssseg5e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e64_v_f64m1_m(
@@ -40,7 +40,7 @@ void test_vssseg5e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vssseg5e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e64_v_u64m1_m(
@@ -58,6 +58,6 @@ void test_vssseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e8.c
index d59d4c46b11f..bb186b36f0e5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vssseg5e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vssseg5e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vssseg5e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf8(
@@ -48,7 +48,7 @@ void test_vssseg5e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf4(
@@ -57,7 +57,7 @@ void test_vssseg5e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf2(
@@ -66,7 +66,7 @@ void test_vssseg5e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_u8m1(
@@ -75,7 +75,7 @@ void test_vssseg5e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf8_m(
@@ -84,7 +84,7 @@ void test_vssseg5e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf4_m(
@@ -93,7 +93,7 @@ void test_vssseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf2_m(
@@ -102,7 +102,7 @@ void test_vssseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_i8m1_m(
@@ -111,7 +111,7 @@ void test_vssseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf8_m(
@@ -120,7 +120,7 @@ void test_vssseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vssseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf2_m(
@@ -138,7 +138,7 @@ void test_vssseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vssseg5e8_v_u8m1_m(
@@ -147,6 +147,6 @@ void test_vssseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg5e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+ return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e16.c
index c830c6f83e9b..9535b4d86006 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vssseg6e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vssseg6e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_i16mf4(
@@ -40,7 +40,7 @@ void test_vssseg6e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_i16mf2(
@@ -49,7 +49,7 @@ void test_vssseg6e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_i16m1(
@@ -58,7 +58,7 @@ void test_vssseg6e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_u16mf4(
@@ -67,7 +67,7 @@ void test_vssseg6e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_u16mf2(
@@ -76,7 +76,7 @@ void test_vssseg6e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_u16m1(
@@ -85,7 +85,7 @@ void test_vssseg6e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4_m(
@@ -94,7 +94,7 @@ void test_vssseg6e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2_m(
@@ -103,7 +103,7 @@ void test_vssseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1_m(
@@ -112,7 +112,7 @@ void test_vssseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_i16mf4_m(
@@ -121,7 +121,7 @@ void test_vssseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_i16mf2_m(
@@ -130,7 +130,7 @@ void test_vssseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_i16m1_m(
@@ -139,7 +139,7 @@ void test_vssseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_u16mf4_m(
@@ -148,7 +148,7 @@ void test_vssseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_u16mf2_m(
@@ -157,7 +157,7 @@ void test_vssseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e16_v_u16m1_m(
@@ -166,6 +166,6 @@ void test_vssseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e32.c
index b58ec0f56ba8..dd14f35dad5d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vssseg6e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_i32mf2(
@@ -31,7 +31,7 @@ void test_vssseg6e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_i32m1(
@@ -40,7 +40,7 @@ void test_vssseg6e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_u32mf2(
@@ -49,7 +49,7 @@ void test_vssseg6e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_u32m1(
@@ -58,7 +58,7 @@ void test_vssseg6e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_f32mf2_m(
@@ -67,7 +67,7 @@ void test_vssseg6e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_f32m1_m(
@@ -76,7 +76,7 @@ void test_vssseg6e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_i32mf2_m(
@@ -85,7 +85,7 @@ void test_vssseg6e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_i32m1_m(
@@ -94,7 +94,7 @@ void test_vssseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_u32mf2_m(
@@ -103,7 +103,7 @@ void test_vssseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e32_v_u32m1_m(
@@ -112,6 +112,6 @@ void test_vssseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e64.c
index 90b0f86ff2f4..01a94d0839fd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e64_v_i64m1(
@@ -22,7 +22,7 @@ void test_vssseg6e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e64_v_u64m1(
@@ -31,7 +31,7 @@ void test_vssseg6e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e64_v_f64m1_m(
@@ -40,7 +40,7 @@ void test_vssseg6e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vssseg6e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e64_v_u64m1_m(
@@ -58,6 +58,6 @@ void test_vssseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e8.c
index 3fbfacac5b07..8e8b3973d01f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vssseg6e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vssseg6e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vssseg6e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf8(
@@ -48,7 +48,7 @@ void test_vssseg6e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf4(
@@ -57,7 +57,7 @@ void test_vssseg6e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf2(
@@ -66,7 +66,7 @@ void test_vssseg6e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_u8m1(
@@ -75,7 +75,7 @@ void test_vssseg6e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf8_m(
@@ -84,7 +84,7 @@ void test_vssseg6e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf4_m(
@@ -93,7 +93,7 @@ void test_vssseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf2_m(
@@ -102,7 +102,7 @@ void test_vssseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_i8m1_m(
@@ -111,7 +111,7 @@ void test_vssseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf8_m(
@@ -120,7 +120,7 @@ void test_vssseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vssseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf2_m(
@@ -138,7 +138,7 @@ void test_vssseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vssseg6e8_v_u8m1_m(
@@ -147,6 +147,6 @@ void test_vssseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg6e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e16.c
index f74d805a803a..91f636542f45 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vssseg7e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vssseg7e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_i16mf4(
@@ -40,7 +40,7 @@ void test_vssseg7e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_i16mf2(
@@ -49,7 +49,7 @@ void test_vssseg7e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_i16m1(
@@ -58,7 +58,7 @@ void test_vssseg7e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_u16mf4(
@@ -67,7 +67,7 @@ void test_vssseg7e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_u16mf2(
@@ -76,7 +76,7 @@ void test_vssseg7e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_u16m1(
@@ -85,7 +85,7 @@ void test_vssseg7e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4_m(
@@ -94,7 +94,7 @@ void test_vssseg7e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2_m(
@@ -103,7 +103,7 @@ void test_vssseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1_m(
@@ -112,7 +112,7 @@ void test_vssseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_i16mf4_m(
@@ -121,7 +121,7 @@ void test_vssseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_i16mf2_m(
@@ -130,7 +130,7 @@ void test_vssseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_i16m1_m(
@@ -139,7 +139,7 @@ void test_vssseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_u16mf4_m(
@@ -148,7 +148,7 @@ void test_vssseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_u16mf2_m(
@@ -157,7 +157,7 @@ void test_vssseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e16_v_u16m1_m(
@@ -166,6 +166,6 @@ void test_vssseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e32.c
index fb87303c173b..f1c993e59e76 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vssseg7e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_i32mf2(
@@ -31,7 +31,7 @@ void test_vssseg7e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_i32m1(
@@ -40,7 +40,7 @@ void test_vssseg7e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_u32mf2(
@@ -49,7 +49,7 @@ void test_vssseg7e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_u32m1(
@@ -58,7 +58,7 @@ void test_vssseg7e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_f32mf2_m(
@@ -67,7 +67,7 @@ void test_vssseg7e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_f32m1_m(
@@ -76,7 +76,7 @@ void test_vssseg7e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_i32mf2_m(
@@ -85,7 +85,7 @@ void test_vssseg7e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_i32m1_m(
@@ -94,7 +94,7 @@ void test_vssseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_u32mf2_m(
@@ -103,7 +103,7 @@ void test_vssseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e32_v_u32m1_m(
@@ -112,6 +112,6 @@ void test_vssseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e64.c
index 3a1f64026ca3..40c24b7a5d46 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e64_v_i64m1(
@@ -22,7 +22,7 @@ void test_vssseg7e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e64_v_u64m1(
@@ -31,7 +31,7 @@ void test_vssseg7e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e64_v_f64m1_m(
@@ -40,7 +40,7 @@ void test_vssseg7e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vssseg7e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e64_v_u64m1_m(
@@ -58,6 +58,6 @@ void test_vssseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e8.c
index 7d70aac82066..005de2819715 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vssseg7e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vssseg7e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vssseg7e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf8(
@@ -48,7 +48,7 @@ void test_vssseg7e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf4(
@@ -57,7 +57,7 @@ void test_vssseg7e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf2(
@@ -66,7 +66,7 @@ void test_vssseg7e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_u8m1(
@@ -75,7 +75,7 @@ void test_vssseg7e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf8_m(
@@ -84,7 +84,7 @@ void test_vssseg7e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf4_m(
@@ -93,7 +93,7 @@ void test_vssseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf2_m(
@@ -102,7 +102,7 @@ void test_vssseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_i8m1_m(
@@ -111,7 +111,7 @@ void test_vssseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf8_m(
@@ -120,7 +120,7 @@ void test_vssseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vssseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf2_m(
@@ -138,7 +138,7 @@ void test_vssseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vssseg7e8_v_u8m1_m(
@@ -147,6 +147,6 @@ void test_vssseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg7e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e16.c
index dd3262e3fd81..2347bd9f88bb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vssseg8e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vssseg8e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_i16mf4(
@@ -40,7 +40,7 @@ void test_vssseg8e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_i16mf2(
@@ -49,7 +49,7 @@ void test_vssseg8e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_i16m1(
@@ -58,7 +58,7 @@ void test_vssseg8e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_u16mf4(
@@ -67,7 +67,7 @@ void test_vssseg8e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_u16mf2(
@@ -76,7 +76,7 @@ void test_vssseg8e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_u16m1(
@@ -85,7 +85,7 @@ void test_vssseg8e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4_m(
@@ -94,7 +94,7 @@ void test_vssseg8e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2_m(
@@ -103,7 +103,7 @@ void test_vssseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1_m(
@@ -112,7 +112,7 @@ void test_vssseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_i16mf4_m(
@@ -121,7 +121,7 @@ void test_vssseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_i16mf2_m(
@@ -130,7 +130,7 @@ void test_vssseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_i16m1_m(
@@ -139,7 +139,7 @@ void test_vssseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_u16mf4_m(
@@ -148,7 +148,7 @@ void test_vssseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_u16mf2_m(
@@ -157,7 +157,7 @@ void test_vssseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e16_v_u16m1_m(
@@ -166,6 +166,6 @@ void test_vssseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e32.c
index e3916b141ba4..a18ca31f3c00 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_f32m1(
@@ -22,7 +22,7 @@ void test_vssseg8e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_i32mf2(
@@ -31,7 +31,7 @@ void test_vssseg8e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_i32m1(
@@ -40,7 +40,7 @@ void test_vssseg8e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_u32mf2(
@@ -49,7 +49,7 @@ void test_vssseg8e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_u32m1(
@@ -58,7 +58,7 @@ void test_vssseg8e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_f32mf2_m(
@@ -67,7 +67,7 @@ void test_vssseg8e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_f32m1_m(
@@ -76,7 +76,7 @@ void test_vssseg8e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_i32mf2_m(
@@ -85,7 +85,7 @@ void test_vssseg8e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_i32m1_m(
@@ -94,7 +94,7 @@ void test_vssseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_u32mf2_m(
@@ -103,7 +103,7 @@ void test_vssseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e32_v_u32m1_m(
@@ -112,6 +112,6 @@ void test_vssseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e64.c
index 65cb51691e9d..b20e8f8bd6e7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e64_v_i64m1(
@@ -22,7 +22,7 @@ void test_vssseg8e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e64_v_u64m1(
@@ -31,7 +31,7 @@ void test_vssseg8e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e64_v_f64m1_m(
@@ -40,7 +40,7 @@ void test_vssseg8e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e64_v_i64m1_m(
@@ -49,7 +49,7 @@ void test_vssseg8e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e64_v_u64m1_m(
@@ -58,6 +58,6 @@ void test_vssseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e8.c
index 9070c014f544..980f2d78e934 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf4(
@@ -21,7 +21,7 @@ void test_vssseg8e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf2(
@@ -30,7 +30,7 @@ void test_vssseg8e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_i8m1(
@@ -39,7 +39,7 @@ void test_vssseg8e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf8(
@@ -48,7 +48,7 @@ void test_vssseg8e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf4(
@@ -57,7 +57,7 @@ void test_vssseg8e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf2(
@@ -66,7 +66,7 @@ void test_vssseg8e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_u8m1(
@@ -75,7 +75,7 @@ void test_vssseg8e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf8_m(
@@ -84,7 +84,7 @@ void test_vssseg8e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf4_m(
@@ -93,7 +93,7 @@ void test_vssseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf2_m(
@@ -102,7 +102,7 @@ void test_vssseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_i8m1_m(
@@ -111,7 +111,7 @@ void test_vssseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf8_m(
@@ -120,7 +120,7 @@ void test_vssseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf4_m(
@@ -129,7 +129,7 @@ void test_vssseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf2_m(
@@ -138,7 +138,7 @@ void test_vssseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vssseg8e8_v_u8m1_m(
@@ -147,6 +147,6 @@ void test_vssseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride,
// CHECK-RV64-NEXT: ret void
//
void test_vssseg8e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c
index 151184e1d6ac..de6065a30071 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vssub(op1, op2, vl);
+ return __riscv_vssub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m(
@@ -408,7 +408,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m(
@@ -417,7 +417,7 @@ vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m(
@@ -426,7 +426,7 @@ vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m(
@@ -435,7 +435,7 @@ vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m(
@@ -444,7 +444,7 @@ vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m(
@@ -453,7 +453,7 @@ vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m(
@@ -462,7 +462,7 @@ vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m(
@@ -471,7 +471,7 @@ vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m(
@@ -480,7 +480,7 @@ vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m(
@@ -489,7 +489,7 @@ vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m(
@@ -498,7 +498,7 @@ vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m(
@@ -507,7 +507,7 @@ vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m(
@@ -516,7 +516,7 @@ vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m(
@@ -525,7 +525,7 @@ vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m(
@@ -534,7 +534,7 @@ vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m(
@@ -543,7 +543,7 @@ vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m(
@@ -552,7 +552,7 @@ vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m(
@@ -561,7 +561,7 @@ vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m(
@@ -570,7 +570,7 @@ vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m(
@@ -579,7 +579,7 @@ vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m(
@@ -588,7 +588,7 @@ vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m(
@@ -597,7 +597,7 @@ vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m(
@@ -606,7 +606,7 @@ vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m(
@@ -615,7 +615,7 @@ vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m(
@@ -624,7 +624,7 @@ vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m(
@@ -633,7 +633,7 @@ vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m(
@@ -642,7 +642,7 @@ vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m(
@@ -651,7 +651,7 @@ vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m(
@@ -660,7 +660,7 @@ vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m(
@@ -669,7 +669,7 @@ vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m(
@@ -678,7 +678,7 @@ vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m(
@@ -687,7 +687,7 @@ vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m(
@@ -696,7 +696,7 @@ vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m(
@@ -705,7 +705,7 @@ vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m(
@@ -714,7 +714,7 @@ vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m(
@@ -723,7 +723,7 @@ vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m(
@@ -732,7 +732,7 @@ vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m(
@@ -741,7 +741,7 @@ vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m(
@@ -750,7 +750,7 @@ vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m(
@@ -759,7 +759,7 @@ vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m(
@@ -768,7 +768,7 @@ vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m(
@@ -777,7 +777,7 @@ vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m(
@@ -786,7 +786,7 @@ vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m(
@@ -795,6 +795,6 @@ vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vssub(mask, op1, op2, vl);
+ return __riscv_vssub(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c
index c30bd9dfa30f..413f7eb0fe75 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1(
@@ -336,7 +336,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1(
@@ -345,7 +345,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2(
@@ -354,7 +354,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2(
@@ -363,7 +363,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4(
@@ -372,7 +372,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4(
@@ -381,7 +381,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8(
@@ -390,7 +390,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8(
@@ -399,7 +399,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vssubu(op1, op2, vl);
+ return __riscv_vssubu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m(
@@ -408,7 +408,7 @@ vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m(
@@ -471,7 +471,7 @@ vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m(
@@ -480,7 +480,7 @@ vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m(
@@ -489,7 +489,7 @@ vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m(
@@ -498,7 +498,7 @@ vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m(
@@ -507,7 +507,7 @@ vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m(
@@ -516,7 +516,7 @@ vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m(
@@ -525,7 +525,7 @@ vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m(
@@ -534,7 +534,7 @@ vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m(
@@ -579,7 +579,7 @@ vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m(
@@ -588,7 +588,7 @@ vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m(
@@ -597,7 +597,7 @@ vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m(
@@ -606,7 +606,7 @@ vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m(
@@ -615,7 +615,7 @@ vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m(
@@ -624,7 +624,7 @@ vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m(
@@ -633,7 +633,7 @@ vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m(
@@ -642,7 +642,7 @@ vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m(
@@ -669,7 +669,7 @@ vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m(
@@ -678,7 +678,7 @@ vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m(
@@ -687,7 +687,7 @@ vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m(
@@ -696,7 +696,7 @@ vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m(
@@ -705,7 +705,7 @@ vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m(
@@ -714,7 +714,7 @@ vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m(
@@ -723,7 +723,7 @@ vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m(
@@ -732,7 +732,7 @@ vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m(
@@ -741,7 +741,7 @@ vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m(
@@ -750,7 +750,7 @@ vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m(
@@ -759,7 +759,7 @@ vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m(
@@ -768,7 +768,7 @@ vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m(
@@ -777,7 +777,7 @@ vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m(
@@ -786,7 +786,7 @@ vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m(
@@ -795,6 +795,6 @@ vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vssubu(mask, op1, op2, vl);
+ return __riscv_vssubu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsub.c
index b2b5af8bd597..5b1018a6bed4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
@@ -408,7 +408,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
@@ -795,7 +795,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsub(op1, op2, vl);
+ return __riscv_vsub(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m(
@@ -804,7 +804,7 @@ vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m(
@@ -813,7 +813,7 @@ vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m(
@@ -822,7 +822,7 @@ vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m(
@@ -831,7 +831,7 @@ vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m(
@@ -840,7 +840,7 @@ vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m(
@@ -849,7 +849,7 @@ vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m(
@@ -858,7 +858,7 @@ vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m(
@@ -867,7 +867,7 @@ vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m(
@@ -876,7 +876,7 @@ vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m(
@@ -885,7 +885,7 @@ vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m(
@@ -894,7 +894,7 @@ vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m(
@@ -903,7 +903,7 @@ vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m(
@@ -912,7 +912,7 @@ vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m(
@@ -921,7 +921,7 @@ vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m(
@@ -930,7 +930,7 @@ vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m(
@@ -939,7 +939,7 @@ vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m(
@@ -948,7 +948,7 @@ vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m(
@@ -957,7 +957,7 @@ vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m(
@@ -966,7 +966,7 @@ vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m(
@@ -975,7 +975,7 @@ vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m(
@@ -984,7 +984,7 @@ vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m(
@@ -993,7 +993,7 @@ vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsub(mask, op1, op2, vl);
+ return __riscv_vsub(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei16.c
index ac3915291b55..f5f24ce041da 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsuxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8(
@@ -58,7 +58,7 @@ void test_vsuxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2(
@@ -67,7 +67,7 @@ void test_vsuxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1(
@@ -76,7 +76,7 @@ void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2(
@@ -85,7 +85,7 @@ void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4(
@@ -94,7 +94,7 @@ void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8(
@@ -103,7 +103,7 @@ void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1(
@@ -112,7 +112,7 @@ void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2(
@@ -121,7 +121,7 @@ void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4(
@@ -130,7 +130,7 @@ void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8(
@@ -139,7 +139,7 @@ void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8(
@@ -148,7 +148,7 @@ void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4(
@@ -157,7 +157,7 @@ void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2(
@@ -166,7 +166,7 @@ void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1(
@@ -175,7 +175,7 @@ void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2(
@@ -184,7 +184,7 @@ void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4(
@@ -193,7 +193,7 @@ void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4(
@@ -202,7 +202,7 @@ void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2(
@@ -211,7 +211,7 @@ void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1(
@@ -220,7 +220,7 @@ void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2(
@@ -229,7 +229,7 @@ void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4(
@@ -238,7 +238,7 @@ void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8(
@@ -247,7 +247,7 @@ void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2(
@@ -256,7 +256,7 @@ void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1(
@@ -265,7 +265,7 @@ void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2(
@@ -274,7 +274,7 @@ void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4(
@@ -283,7 +283,7 @@ void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8(
@@ -292,7 +292,7 @@ void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1(
@@ -301,7 +301,7 @@ void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2(
@@ -310,7 +310,7 @@ void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4(
@@ -319,7 +319,7 @@ void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8(
@@ -328,7 +328,7 @@ void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8(
@@ -337,7 +337,7 @@ void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4(
@@ -346,7 +346,7 @@ void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2(
@@ -355,7 +355,7 @@ void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1(
@@ -364,7 +364,7 @@ void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2(
@@ -373,7 +373,7 @@ void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4(
@@ -382,7 +382,7 @@ void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4(
@@ -391,7 +391,7 @@ void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2(
@@ -400,7 +400,7 @@ void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1(
@@ -409,7 +409,7 @@ void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2(
@@ -418,7 +418,7 @@ void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4(
@@ -427,7 +427,7 @@ void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8(
@@ -436,7 +436,7 @@ void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2(
@@ -445,7 +445,7 @@ void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1(
@@ -454,7 +454,7 @@ void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2(
@@ -463,7 +463,7 @@ void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4(
@@ -472,7 +472,7 @@ void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8(
@@ -481,7 +481,7 @@ void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1(
@@ -490,7 +490,7 @@ void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2(
@@ -499,7 +499,7 @@ void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4(
@@ -508,7 +508,7 @@ void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8(
@@ -517,7 +517,7 @@ void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
- return vsuxei16(base, bindex, value, vl);
+ return __riscv_vsuxei16(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4_m(
@@ -526,7 +526,7 @@ void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2_m(
@@ -535,7 +535,7 @@ void test_vsuxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1_m(
@@ -544,7 +544,7 @@ void test_vsuxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2_m(
@@ -553,7 +553,7 @@ void test_vsuxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4_m(
@@ -562,7 +562,7 @@ void test_vsuxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8_m(
@@ -571,7 +571,7 @@ void test_vsuxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m(
@@ -580,7 +580,7 @@ void test_vsuxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m(
@@ -589,7 +589,7 @@ void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m(
@@ -598,7 +598,7 @@ void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m(
@@ -607,7 +607,7 @@ void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m(
@@ -616,7 +616,7 @@ void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1_m(
@@ -625,7 +625,7 @@ void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2_m(
@@ -634,7 +634,7 @@ void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4_m(
@@ -643,7 +643,7 @@ void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8_m(
@@ -652,7 +652,7 @@ void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8_m(
@@ -661,7 +661,7 @@ void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4_m(
@@ -670,7 +670,7 @@ void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2_m(
@@ -679,7 +679,7 @@ void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1_m(
@@ -688,7 +688,7 @@ void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2_m(
@@ -697,7 +697,7 @@ void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4_m(
@@ -706,7 +706,7 @@ void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4_m(
@@ -715,7 +715,7 @@ void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2_m(
@@ -724,7 +724,7 @@ void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1_m(
@@ -733,7 +733,7 @@ void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2_m(
@@ -742,7 +742,7 @@ void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4_m(
@@ -751,7 +751,7 @@ void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8_m(
@@ -760,7 +760,7 @@ void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2_m(
@@ -769,7 +769,7 @@ void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1_m(
@@ -778,7 +778,7 @@ void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2_m(
@@ -787,7 +787,7 @@ void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4_m(
@@ -796,7 +796,7 @@ void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8_m(
@@ -805,7 +805,7 @@ void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1_m(
@@ -814,7 +814,7 @@ void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2_m(
@@ -823,7 +823,7 @@ void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4_m(
@@ -832,7 +832,7 @@ void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8_m(
@@ -841,7 +841,7 @@ void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8_m(
@@ -850,7 +850,7 @@ void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4_m(
@@ -859,7 +859,7 @@ void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2_m(
@@ -868,7 +868,7 @@ void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1_m(
@@ -877,7 +877,7 @@ void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2_m(
@@ -886,7 +886,7 @@ void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4_m(
@@ -895,7 +895,7 @@ void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4_m(
@@ -904,7 +904,7 @@ void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2_m(
@@ -913,7 +913,7 @@ void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1_m(
@@ -922,7 +922,7 @@ void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2_m(
@@ -931,7 +931,7 @@ void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4_m(
@@ -940,7 +940,7 @@ void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8_m(
@@ -949,7 +949,7 @@ void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2_m(
@@ -958,7 +958,7 @@ void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1_m(
@@ -967,7 +967,7 @@ void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2_m(
@@ -976,7 +976,7 @@ void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4_m(
@@ -985,7 +985,7 @@ void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8_m(
@@ -994,7 +994,7 @@ void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1_m(
@@ -1003,7 +1003,7 @@ void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2_m(
@@ -1012,7 +1012,7 @@ void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4_m(
@@ -1021,7 +1021,7 @@ void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8_m(
@@ -1030,6 +1030,6 @@ void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
- return vsuxei16(mask, base, bindex, value, vl);
+ return __riscv_vsuxei16(mask, base, bindex, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei32.c
index 70028e629cb9..e87b8eaca915 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsuxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2(
@@ -58,7 +58,7 @@ void test_vsuxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1(
@@ -67,7 +67,7 @@ void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2(
@@ -76,7 +76,7 @@ void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4(
@@ -85,7 +85,7 @@ void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8(
@@ -94,7 +94,7 @@ void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1(
@@ -103,7 +103,7 @@ void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2(
@@ -112,7 +112,7 @@ void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4(
@@ -121,7 +121,7 @@ void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8(
@@ -130,7 +130,7 @@ void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8(
@@ -139,7 +139,7 @@ void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4(
@@ -148,7 +148,7 @@ void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2(
@@ -157,7 +157,7 @@ void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1(
@@ -166,7 +166,7 @@ void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2(
@@ -175,7 +175,7 @@ void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4(
@@ -184,7 +184,7 @@ void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2(
@@ -193,7 +193,7 @@ void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1(
@@ -202,7 +202,7 @@ void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2(
@@ -211,7 +211,7 @@ void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4(
@@ -220,7 +220,7 @@ void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2(
@@ -229,7 +229,7 @@ void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1(
@@ -238,7 +238,7 @@ void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2(
@@ -247,7 +247,7 @@ void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4(
@@ -256,7 +256,7 @@ void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8(
@@ -265,7 +265,7 @@ void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1(
@@ -274,7 +274,7 @@ void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2(
@@ -283,7 +283,7 @@ void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4(
@@ -292,7 +292,7 @@ void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8(
@@ -301,7 +301,7 @@ void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8(
@@ -310,7 +310,7 @@ void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4(
@@ -319,7 +319,7 @@ void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2(
@@ -328,7 +328,7 @@ void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1(
@@ -337,7 +337,7 @@ void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2(
@@ -346,7 +346,7 @@ void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4(
@@ -355,7 +355,7 @@ void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2(
@@ -364,7 +364,7 @@ void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1(
@@ -373,7 +373,7 @@ void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2(
@@ -382,7 +382,7 @@ void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4(
@@ -391,7 +391,7 @@ void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2(
@@ -400,7 +400,7 @@ void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1(
@@ -409,7 +409,7 @@ void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2(
@@ -418,7 +418,7 @@ void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4(
@@ -427,7 +427,7 @@ void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8(
@@ -436,7 +436,7 @@ void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1(
@@ -445,7 +445,7 @@ void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2(
@@ -454,7 +454,7 @@ void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4(
@@ -463,7 +463,7 @@ void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8(
@@ -472,7 +472,7 @@ void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
- return vsuxei32(base, bindex, value, vl);
+ return __riscv_vsuxei32(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4_m(
@@ -481,7 +481,7 @@ void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2_m(
@@ -490,7 +490,7 @@ void test_vsuxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1_m(
@@ -499,7 +499,7 @@ void test_vsuxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2_m(
@@ -508,7 +508,7 @@ void test_vsuxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4_m(
@@ -517,7 +517,7 @@ void test_vsuxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2_m(
@@ -526,7 +526,7 @@ void test_vsuxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1_m(
@@ -535,7 +535,7 @@ void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2_m(
@@ -544,7 +544,7 @@ void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4_m(
@@ -553,7 +553,7 @@ void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8_m(
@@ -562,7 +562,7 @@ void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1_m(
@@ -571,7 +571,7 @@ void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2_m(
@@ -580,7 +580,7 @@ void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4_m(
@@ -589,7 +589,7 @@ void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8_m(
@@ -598,7 +598,7 @@ void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8_m(
@@ -607,7 +607,7 @@ void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4_m(
@@ -616,7 +616,7 @@ void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2_m(
@@ -625,7 +625,7 @@ void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1_m(
@@ -634,7 +634,7 @@ void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2_m(
@@ -643,7 +643,7 @@ void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4_m(
@@ -652,7 +652,7 @@ void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2_m(
@@ -661,7 +661,7 @@ void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1_m(
@@ -670,7 +670,7 @@ void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2_m(
@@ -679,7 +679,7 @@ void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4_m(
@@ -688,7 +688,7 @@ void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2_m(
@@ -697,7 +697,7 @@ void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1_m(
@@ -706,7 +706,7 @@ void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2_m(
@@ -715,7 +715,7 @@ void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4_m(
@@ -724,7 +724,7 @@ void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8_m(
@@ -733,7 +733,7 @@ void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1_m(
@@ -742,7 +742,7 @@ void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2_m(
@@ -751,7 +751,7 @@ void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4_m(
@@ -760,7 +760,7 @@ void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8_m(
@@ -769,7 +769,7 @@ void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8_m(
@@ -778,7 +778,7 @@ void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4_m(
@@ -787,7 +787,7 @@ void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2_m(
@@ -796,7 +796,7 @@ void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1_m(
@@ -805,7 +805,7 @@ void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2_m(
@@ -814,7 +814,7 @@ void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4_m(
@@ -823,7 +823,7 @@ void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2_m(
@@ -832,7 +832,7 @@ void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1_m(
@@ -841,7 +841,7 @@ void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2_m(
@@ -850,7 +850,7 @@ void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4_m(
@@ -859,7 +859,7 @@ void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2_m(
@@ -868,7 +868,7 @@ void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1_m(
@@ -877,7 +877,7 @@ void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2_m(
@@ -886,7 +886,7 @@ void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4_m(
@@ -895,7 +895,7 @@ void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8_m(
@@ -904,7 +904,7 @@ void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1_m(
@@ -913,7 +913,7 @@ void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2_m(
@@ -922,7 +922,7 @@ void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4_m(
@@ -931,7 +931,7 @@ void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8_m(
@@ -940,6 +940,6 @@ void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
- return vsuxei32(mask, base, bindex, value, vl);
+ return __riscv_vsuxei32(mask, base, bindex, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei64.c
index 6b6525c2a820..751383e54353 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t va
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4(
@@ -76,7 +76,7 @@ void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1(
@@ -85,7 +85,7 @@ void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2(
@@ -94,7 +94,7 @@ void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4(
@@ -103,7 +103,7 @@ void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8(
@@ -112,7 +112,7 @@ void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8(
@@ -121,7 +121,7 @@ void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4(
@@ -130,7 +130,7 @@ void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2(
@@ -139,7 +139,7 @@ void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1(
@@ -148,7 +148,7 @@ void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4(
@@ -157,7 +157,7 @@ void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2(
@@ -166,7 +166,7 @@ void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1(
@@ -175,7 +175,7 @@ void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2(
@@ -184,7 +184,7 @@ void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2(
@@ -193,7 +193,7 @@ void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1(
@@ -202,7 +202,7 @@ void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2(
@@ -211,7 +211,7 @@ void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4(
@@ -220,7 +220,7 @@ void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1(
@@ -229,7 +229,7 @@ void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2(
@@ -238,7 +238,7 @@ void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4(
@@ -247,7 +247,7 @@ void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8(
@@ -256,7 +256,7 @@ void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8(
@@ -265,7 +265,7 @@ void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4(
@@ -274,7 +274,7 @@ void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2(
@@ -283,7 +283,7 @@ void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1(
@@ -292,7 +292,7 @@ void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4(
@@ -301,7 +301,7 @@ void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2(
@@ -310,7 +310,7 @@ void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1(
@@ -319,7 +319,7 @@ void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2(
@@ -328,7 +328,7 @@ void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2(
@@ -337,7 +337,7 @@ void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1(
@@ -346,7 +346,7 @@ void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2(
@@ -355,7 +355,7 @@ void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4(
@@ -364,7 +364,7 @@ void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1(
@@ -373,7 +373,7 @@ void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2(
@@ -382,7 +382,7 @@ void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4(
@@ -391,7 +391,7 @@ void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8(
@@ -400,7 +400,7 @@ void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
- return vsuxei64(base, bindex, value, vl);
+ return __riscv_vsuxei64(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4_m(
@@ -409,7 +409,7 @@ void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2_m(
@@ -418,7 +418,7 @@ void test_vsuxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1_m(
@@ -427,7 +427,7 @@ void test_vsuxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2_m(
@@ -436,7 +436,7 @@ void test_vsuxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2_m(
@@ -445,7 +445,7 @@ void test_vsuxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1_m(
@@ -454,7 +454,7 @@ void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2_m(
@@ -463,7 +463,7 @@ void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4_m(
@@ -472,7 +472,7 @@ void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1_m(
@@ -481,7 +481,7 @@ void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2_m(
@@ -490,7 +490,7 @@ void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4_m(
@@ -499,7 +499,7 @@ void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8_m(
@@ -508,7 +508,7 @@ void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8_m(
@@ -517,7 +517,7 @@ void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4_m(
@@ -526,7 +526,7 @@ void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2_m(
@@ -535,7 +535,7 @@ void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1_m(
@@ -544,7 +544,7 @@ void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4_m(
@@ -553,7 +553,7 @@ void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2_m(
@@ -562,7 +562,7 @@ void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1_m(
@@ -571,7 +571,7 @@ void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2_m(
@@ -580,7 +580,7 @@ void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2_m(
@@ -589,7 +589,7 @@ void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1_m(
@@ -598,7 +598,7 @@ void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2_m(
@@ -607,7 +607,7 @@ void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4_m(
@@ -616,7 +616,7 @@ void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1_m(
@@ -625,7 +625,7 @@ void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2_m(
@@ -634,7 +634,7 @@ void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4_m(
@@ -643,7 +643,7 @@ void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8_m(
@@ -652,7 +652,7 @@ void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8_m(
@@ -661,7 +661,7 @@ void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4_m(
@@ -670,7 +670,7 @@ void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2_m(
@@ -679,7 +679,7 @@ void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1_m(
@@ -688,7 +688,7 @@ void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4_m(
@@ -697,7 +697,7 @@ void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2_m(
@@ -706,7 +706,7 @@ void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1_m(
@@ -715,7 +715,7 @@ void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2_m(
@@ -724,7 +724,7 @@ void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2_m(
@@ -733,7 +733,7 @@ void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1_m(
@@ -742,7 +742,7 @@ void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2_m(
@@ -751,7 +751,7 @@ void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4_m(
@@ -760,7 +760,7 @@ void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1_m(
@@ -769,7 +769,7 @@ void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2_m(
@@ -778,7 +778,7 @@ void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4_m(
@@ -787,7 +787,7 @@ void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8_m(
@@ -796,6 +796,6 @@ void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
- return vsuxei64(mask, base, bindex, value, vl);
+ return __riscv_vsuxei64(mask, base, bindex, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei8.c
index ae8342a55ffb..c568cbf43b18 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t val
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsuxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8(
@@ -58,7 +58,7 @@ void test_vsuxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2(
@@ -67,7 +67,7 @@ void test_vsuxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1(
@@ -76,7 +76,7 @@ void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2(
@@ -85,7 +85,7 @@ void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4(
@@ -94,7 +94,7 @@ void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8(
@@ -103,7 +103,7 @@ void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1(
@@ -112,7 +112,7 @@ void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2(
@@ -121,7 +121,7 @@ void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4(
@@ -130,7 +130,7 @@ void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8(
@@ -139,7 +139,7 @@ void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8(
@@ -148,7 +148,7 @@ void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4(
@@ -157,7 +157,7 @@ void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2(
@@ -166,7 +166,7 @@ void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1(
@@ -175,7 +175,7 @@ void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2(
@@ -184,7 +184,7 @@ void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4(
@@ -193,7 +193,7 @@ void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8(
@@ -202,7 +202,7 @@ void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4(
@@ -211,7 +211,7 @@ void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2(
@@ -220,7 +220,7 @@ void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1(
@@ -229,7 +229,7 @@ void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2(
@@ -238,7 +238,7 @@ void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4(
@@ -247,7 +247,7 @@ void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8(
@@ -256,7 +256,7 @@ void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2(
@@ -265,7 +265,7 @@ void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1(
@@ -274,7 +274,7 @@ void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2(
@@ -283,7 +283,7 @@ void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4(
@@ -292,7 +292,7 @@ void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8(
@@ -301,7 +301,7 @@ void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1(
@@ -310,7 +310,7 @@ void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2(
@@ -319,7 +319,7 @@ void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4(
@@ -328,7 +328,7 @@ void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8(
@@ -337,7 +337,7 @@ void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, s
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8(
@@ -346,7 +346,7 @@ void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, si
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4(
@@ -355,7 +355,7 @@ void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2(
@@ -364,7 +364,7 @@ void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1(
@@ -373,7 +373,7 @@ void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2(
@@ -382,7 +382,7 @@ void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4(
@@ -391,7 +391,7 @@ void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8(
@@ -400,7 +400,7 @@ void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4(
@@ -409,7 +409,7 @@ void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, siz
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2(
@@ -418,7 +418,7 @@ void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1(
@@ -427,7 +427,7 @@ void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2(
@@ -436,7 +436,7 @@ void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4(
@@ -445,7 +445,7 @@ void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8(
@@ -454,7 +454,7 @@ void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2(
@@ -463,7 +463,7 @@ void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1(
@@ -472,7 +472,7 @@ void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t valu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2(
@@ -481,7 +481,7 @@ void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4(
@@ -490,7 +490,7 @@ void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8(
@@ -499,7 +499,7 @@ void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1(
@@ -508,7 +508,7 @@ void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2(
@@ -517,7 +517,7 @@ void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4(
@@ -526,7 +526,7 @@ void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8(
@@ -535,7 +535,7 @@ void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
- return vsuxei8(base, bindex, value, vl);
+ return __riscv_vsuxei8(base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4_m(
@@ -544,7 +544,7 @@ void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2_m(
@@ -553,7 +553,7 @@ void test_vsuxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1_m(
@@ -562,7 +562,7 @@ void test_vsuxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2_m(
@@ -571,7 +571,7 @@ void test_vsuxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4_m(
@@ -580,7 +580,7 @@ void test_vsuxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8_m(
@@ -589,7 +589,7 @@ void test_vsuxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m(
@@ -598,7 +598,7 @@ void test_vsuxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m(
@@ -607,7 +607,7 @@ void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m(
@@ -616,7 +616,7 @@ void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m(
@@ -625,7 +625,7 @@ void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfl
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m(
@@ -634,7 +634,7 @@ void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1_m(
@@ -643,7 +643,7 @@ void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2_m(
@@ -652,7 +652,7 @@ void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4_m(
@@ -661,7 +661,7 @@ void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8_m(
@@ -670,7 +670,7 @@ void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vf
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8_m(
@@ -679,7 +679,7 @@ void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vflo
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4_m(
@@ -688,7 +688,7 @@ void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2_m(
@@ -697,7 +697,7 @@ void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1_m(
@@ -706,7 +706,7 @@ void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2_m(
@@ -715,7 +715,7 @@ void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4_m(
@@ -724,7 +724,7 @@ void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8_m(
@@ -733,7 +733,7 @@ void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4_m(
@@ -742,7 +742,7 @@ void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2_m(
@@ -751,7 +751,7 @@ void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1_m(
@@ -760,7 +760,7 @@ void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2_m(
@@ -769,7 +769,7 @@ void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4_m(
@@ -778,7 +778,7 @@ void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8_m(
@@ -787,7 +787,7 @@ void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2_m(
@@ -796,7 +796,7 @@ void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1_m(
@@ -805,7 +805,7 @@ void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2_m(
@@ -814,7 +814,7 @@ void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4_m(
@@ -823,7 +823,7 @@ void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8_m(
@@ -832,7 +832,7 @@ void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1_m(
@@ -841,7 +841,7 @@ void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2_m(
@@ -850,7 +850,7 @@ void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4_m(
@@ -859,7 +859,7 @@ void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8_m(
@@ -868,7 +868,7 @@ void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8_m(
@@ -877,7 +877,7 @@ void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4_m(
@@ -886,7 +886,7 @@ void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2_m(
@@ -895,7 +895,7 @@ void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1_m(
@@ -904,7 +904,7 @@ void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2_m(
@@ -913,7 +913,7 @@ void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4_m(
@@ -922,7 +922,7 @@ void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8_m(
@@ -931,7 +931,7 @@ void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4_m(
@@ -940,7 +940,7 @@ void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2_m(
@@ -949,7 +949,7 @@ void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1_m(
@@ -958,7 +958,7 @@ void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2_m(
@@ -967,7 +967,7 @@ void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4_m(
@@ -976,7 +976,7 @@ void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8_m(
@@ -985,7 +985,7 @@ void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2_m(
@@ -994,7 +994,7 @@ void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1_m(
@@ -1003,7 +1003,7 @@ void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2_m(
@@ -1012,7 +1012,7 @@ void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4_m(
@@ -1021,7 +1021,7 @@ void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8_m(
@@ -1030,7 +1030,7 @@ void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1_m(
@@ -1039,7 +1039,7 @@ void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2_m(
@@ -1048,7 +1048,7 @@ void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4_m(
@@ -1057,7 +1057,7 @@ void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8_m(
@@ -1066,6 +1066,6 @@ void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
- return vsuxei8(mask, base, bindex, value, vl);
+ return __riscv_vsuxei8(mask, base, bindex, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c
index 4e6733a24820..661ea49acdae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsuxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2(
@@ -58,7 +58,7 @@ void test_vsuxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1(
@@ -67,7 +67,7 @@ void test_vsuxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2(
@@ -76,7 +76,7 @@ void test_vsuxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4(
@@ -85,7 +85,7 @@ void test_vsuxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1(
@@ -94,7 +94,7 @@ void test_vsuxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2(
@@ -103,7 +103,7 @@ void test_vsuxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4(
@@ -112,7 +112,7 @@ void test_vsuxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8(
@@ -121,7 +121,7 @@ void test_vsuxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4(
@@ -130,7 +130,7 @@ void test_vsuxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2(
@@ -139,7 +139,7 @@ void test_vsuxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1(
@@ -148,7 +148,7 @@ void test_vsuxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2(
@@ -157,7 +157,7 @@ void test_vsuxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4(
@@ -166,7 +166,7 @@ void test_vsuxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4(
@@ -175,7 +175,7 @@ void test_vsuxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2(
@@ -184,7 +184,7 @@ void test_vsuxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1(
@@ -193,7 +193,7 @@ void test_vsuxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2(
@@ -202,7 +202,7 @@ void test_vsuxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4(
@@ -211,7 +211,7 @@ void test_vsuxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2(
@@ -238,7 +238,7 @@ void test_vsuxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4(
@@ -247,7 +247,7 @@ void test_vsuxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1(
@@ -256,7 +256,7 @@ void test_vsuxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2(
@@ -265,7 +265,7 @@ void test_vsuxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4(
@@ -274,7 +274,7 @@ void test_vsuxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8(
@@ -283,7 +283,7 @@ void test_vsuxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4(
@@ -292,7 +292,7 @@ void test_vsuxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2(
@@ -301,7 +301,7 @@ void test_vsuxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1(
@@ -310,7 +310,7 @@ void test_vsuxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2(
@@ -319,7 +319,7 @@ void test_vsuxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4(
@@ -328,7 +328,7 @@ void test_vsuxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4(
@@ -337,7 +337,7 @@ void test_vsuxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2(
@@ -346,7 +346,7 @@ void test_vsuxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1(
@@ -355,7 +355,7 @@ void test_vsuxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2(
@@ -364,7 +364,7 @@ void test_vsuxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4(
@@ -373,7 +373,7 @@ void test_vsuxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2(
@@ -382,7 +382,7 @@ void test_vsuxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1(
@@ -391,7 +391,7 @@ void test_vsuxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2(
@@ -400,7 +400,7 @@ void test_vsuxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4(
@@ -409,7 +409,7 @@ void test_vsuxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1(
@@ -418,7 +418,7 @@ void test_vsuxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2(
@@ -427,7 +427,7 @@ void test_vsuxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4(
@@ -436,7 +436,7 @@ void test_vsuxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsuxseg2ei16(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4_m(
@@ -445,7 +445,7 @@ void test_vsuxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2_m(
@@ -472,7 +472,7 @@ void test_vsuxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4_m(
@@ -481,7 +481,7 @@ void test_vsuxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2_m(
@@ -490,7 +490,7 @@ void test_vsuxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1_m(
@@ -499,7 +499,7 @@ void test_vsuxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2_m(
@@ -508,7 +508,7 @@ void test_vsuxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4_m(
@@ -517,7 +517,7 @@ void test_vsuxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1_m(
@@ -526,7 +526,7 @@ void test_vsuxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2_m(
@@ -535,7 +535,7 @@ void test_vsuxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4_m(
@@ -544,7 +544,7 @@ void test_vsuxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m(
@@ -553,7 +553,7 @@ void test_vsuxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m(
@@ -598,7 +598,7 @@ void test_vsuxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m(
@@ -607,7 +607,7 @@ void test_vsuxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m(
@@ -616,7 +616,7 @@ void test_vsuxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m(
@@ -625,7 +625,7 @@ void test_vsuxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m(
@@ -634,7 +634,7 @@ void test_vsuxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m(
@@ -643,7 +643,7 @@ void test_vsuxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m(
@@ -652,7 +652,7 @@ void test_vsuxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m(
@@ -661,7 +661,7 @@ void test_vsuxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m(
@@ -670,7 +670,7 @@ void test_vsuxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m(
@@ -679,7 +679,7 @@ void test_vsuxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m(
@@ -688,7 +688,7 @@ void test_vsuxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m(
@@ -697,7 +697,7 @@ void test_vsuxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m(
@@ -706,7 +706,7 @@ void test_vsuxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m(
@@ -715,7 +715,7 @@ void test_vsuxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m(
@@ -724,7 +724,7 @@ void test_vsuxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m(
@@ -733,7 +733,7 @@ void test_vsuxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m(
@@ -742,7 +742,7 @@ void test_vsuxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m(
@@ -751,7 +751,7 @@ void test_vsuxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m(
@@ -760,7 +760,7 @@ void test_vsuxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m(
@@ -769,7 +769,7 @@ void test_vsuxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m(
@@ -778,7 +778,7 @@ void test_vsuxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m(
@@ -787,7 +787,7 @@ void test_vsuxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m(
@@ -796,7 +796,7 @@ void test_vsuxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m(
@@ -805,7 +805,7 @@ void test_vsuxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m(
@@ -814,7 +814,7 @@ void test_vsuxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m(
@@ -823,7 +823,7 @@ void test_vsuxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m(
@@ -832,7 +832,7 @@ void test_vsuxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m(
@@ -841,7 +841,7 @@ void test_vsuxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m(
@@ -850,7 +850,7 @@ void test_vsuxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m(
@@ -859,7 +859,7 @@ void test_vsuxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m(
@@ -868,6 +868,6 @@ void test_vsuxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c
index 3a3ff45a0087..7fc61d373b2d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsuxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2(
@@ -58,7 +58,7 @@ void test_vsuxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1(
@@ -67,7 +67,7 @@ void test_vsuxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2(
@@ -76,7 +76,7 @@ void test_vsuxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4(
@@ -85,7 +85,7 @@ void test_vsuxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1(
@@ -94,7 +94,7 @@ void test_vsuxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2(
@@ -103,7 +103,7 @@ void test_vsuxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4(
@@ -112,7 +112,7 @@ void test_vsuxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8(
@@ -121,7 +121,7 @@ void test_vsuxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4(
@@ -130,7 +130,7 @@ void test_vsuxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2(
@@ -139,7 +139,7 @@ void test_vsuxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1(
@@ -148,7 +148,7 @@ void test_vsuxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2(
@@ -157,7 +157,7 @@ void test_vsuxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1(
@@ -184,7 +184,7 @@ void test_vsuxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2(
@@ -193,7 +193,7 @@ void test_vsuxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4(
@@ -202,7 +202,7 @@ void test_vsuxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2(
@@ -211,7 +211,7 @@ void test_vsuxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1(
@@ -220,7 +220,7 @@ void test_vsuxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2(
@@ -229,7 +229,7 @@ void test_vsuxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4(
@@ -238,7 +238,7 @@ void test_vsuxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1(
@@ -247,7 +247,7 @@ void test_vsuxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2(
@@ -256,7 +256,7 @@ void test_vsuxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4(
@@ -265,7 +265,7 @@ void test_vsuxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8(
@@ -274,7 +274,7 @@ void test_vsuxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4(
@@ -283,7 +283,7 @@ void test_vsuxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2(
@@ -292,7 +292,7 @@ void test_vsuxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1(
@@ -301,7 +301,7 @@ void test_vsuxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2(
@@ -310,7 +310,7 @@ void test_vsuxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4(
@@ -319,7 +319,7 @@ void test_vsuxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2(
@@ -328,7 +328,7 @@ void test_vsuxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1(
@@ -337,7 +337,7 @@ void test_vsuxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2(
@@ -346,7 +346,7 @@ void test_vsuxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4(
@@ -355,7 +355,7 @@ void test_vsuxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2(
@@ -364,7 +364,7 @@ void test_vsuxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1(
@@ -373,7 +373,7 @@ void test_vsuxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2(
@@ -382,7 +382,7 @@ void test_vsuxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4(
@@ -391,7 +391,7 @@ void test_vsuxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1(
@@ -400,7 +400,7 @@ void test_vsuxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2(
@@ -409,7 +409,7 @@ void test_vsuxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4(
@@ -418,7 +418,7 @@ void test_vsuxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsuxseg2ei32(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4_m(
@@ -463,7 +463,7 @@ void test_vsuxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2_m(
@@ -472,7 +472,7 @@ void test_vsuxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1_m(
@@ -481,7 +481,7 @@ void test_vsuxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2_m(
@@ -490,7 +490,7 @@ void test_vsuxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4_m(
@@ -499,7 +499,7 @@ void test_vsuxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1_m(
@@ -508,7 +508,7 @@ void test_vsuxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2_m(
@@ -517,7 +517,7 @@ void test_vsuxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4_m(
@@ -526,7 +526,7 @@ void test_vsuxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m(
@@ -535,7 +535,7 @@ void test_vsuxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m(
@@ -544,7 +544,7 @@ void test_vsuxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m(
@@ -553,7 +553,7 @@ void test_vsuxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m(
@@ -562,7 +562,7 @@ void test_vsuxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m(
@@ -580,7 +580,7 @@ void test_vsuxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m(
@@ -598,7 +598,7 @@ void test_vsuxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m(
@@ -607,7 +607,7 @@ void test_vsuxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m(
@@ -616,7 +616,7 @@ void test_vsuxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m(
@@ -625,7 +625,7 @@ void test_vsuxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m(
@@ -634,7 +634,7 @@ void test_vsuxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m(
@@ -643,7 +643,7 @@ void test_vsuxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m(
@@ -652,7 +652,7 @@ void test_vsuxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m(
@@ -661,7 +661,7 @@ void test_vsuxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m(
@@ -670,7 +670,7 @@ void test_vsuxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m(
@@ -679,7 +679,7 @@ void test_vsuxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m(
@@ -688,7 +688,7 @@ void test_vsuxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m(
@@ -697,7 +697,7 @@ void test_vsuxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m(
@@ -706,7 +706,7 @@ void test_vsuxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m(
@@ -715,7 +715,7 @@ void test_vsuxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m(
@@ -724,7 +724,7 @@ void test_vsuxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m(
@@ -733,7 +733,7 @@ void test_vsuxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m(
@@ -742,7 +742,7 @@ void test_vsuxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m(
@@ -751,7 +751,7 @@ void test_vsuxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m(
@@ -760,7 +760,7 @@ void test_vsuxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m(
@@ -769,7 +769,7 @@ void test_vsuxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m(
@@ -778,7 +778,7 @@ void test_vsuxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m(
@@ -787,7 +787,7 @@ void test_vsuxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m(
@@ -796,7 +796,7 @@ void test_vsuxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m(
@@ -805,7 +805,7 @@ void test_vsuxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m(
@@ -814,7 +814,7 @@ void test_vsuxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m(
@@ -823,7 +823,7 @@ void test_vsuxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m(
@@ -832,6 +832,6 @@ void test_vsuxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c
index 97f490663c6a..1ca98e9eb10c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4(
@@ -76,7 +76,7 @@ void test_vsuxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1(
@@ -85,7 +85,7 @@ void test_vsuxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2(
@@ -94,7 +94,7 @@ void test_vsuxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4(
@@ -103,7 +103,7 @@ void test_vsuxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8(
@@ -112,7 +112,7 @@ void test_vsuxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4(
@@ -121,7 +121,7 @@ void test_vsuxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1(
@@ -139,7 +139,7 @@ void test_vsuxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4(
@@ -148,7 +148,7 @@ void test_vsuxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2(
@@ -157,7 +157,7 @@ void test_vsuxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1(
@@ -166,7 +166,7 @@ void test_vsuxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2(
@@ -175,7 +175,7 @@ void test_vsuxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2(
@@ -184,7 +184,7 @@ void test_vsuxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1(
@@ -193,7 +193,7 @@ void test_vsuxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2(
@@ -202,7 +202,7 @@ void test_vsuxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4(
@@ -211,7 +211,7 @@ void test_vsuxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1(
@@ -220,7 +220,7 @@ void test_vsuxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2(
@@ -229,7 +229,7 @@ void test_vsuxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4(
@@ -238,7 +238,7 @@ void test_vsuxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8(
@@ -247,7 +247,7 @@ void test_vsuxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4(
@@ -256,7 +256,7 @@ void test_vsuxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2(
@@ -265,7 +265,7 @@ void test_vsuxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1(
@@ -274,7 +274,7 @@ void test_vsuxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4(
@@ -283,7 +283,7 @@ void test_vsuxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2(
@@ -292,7 +292,7 @@ void test_vsuxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1(
@@ -301,7 +301,7 @@ void test_vsuxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2(
@@ -310,7 +310,7 @@ void test_vsuxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2(
@@ -319,7 +319,7 @@ void test_vsuxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1(
@@ -328,7 +328,7 @@ void test_vsuxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2(
@@ -337,7 +337,7 @@ void test_vsuxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4(
@@ -346,7 +346,7 @@ void test_vsuxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1(
@@ -355,7 +355,7 @@ void test_vsuxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2(
@@ -364,7 +364,7 @@ void test_vsuxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4(
@@ -373,7 +373,7 @@ void test_vsuxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsuxseg2ei64(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4_m(
@@ -382,7 +382,7 @@ void test_vsuxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2_m(
@@ -391,7 +391,7 @@ void test_vsuxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1_m(
@@ -400,7 +400,7 @@ void test_vsuxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2_m(
@@ -418,7 +418,7 @@ void test_vsuxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1_m(
@@ -427,7 +427,7 @@ void test_vsuxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4_m(
@@ -445,7 +445,7 @@ void test_vsuxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1_m(
@@ -454,7 +454,7 @@ void test_vsuxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2_m(
@@ -463,7 +463,7 @@ void test_vsuxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4_m(
@@ -472,7 +472,7 @@ void test_vsuxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m(
@@ -481,7 +481,7 @@ void test_vsuxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m(
@@ -490,7 +490,7 @@ void test_vsuxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m(
@@ -499,7 +499,7 @@ void test_vsuxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m(
@@ -508,7 +508,7 @@ void test_vsuxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m(
@@ -517,7 +517,7 @@ void test_vsuxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m(
@@ -526,7 +526,7 @@ void test_vsuxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m(
@@ -535,7 +535,7 @@ void test_vsuxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m(
@@ -544,7 +544,7 @@ void test_vsuxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m(
@@ -553,7 +553,7 @@ void test_vsuxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m(
@@ -562,7 +562,7 @@ void test_vsuxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m(
@@ -580,7 +580,7 @@ void test_vsuxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m(
@@ -589,7 +589,7 @@ void test_vsuxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m(
@@ -598,7 +598,7 @@ void test_vsuxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m(
@@ -607,7 +607,7 @@ void test_vsuxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m(
@@ -616,7 +616,7 @@ void test_vsuxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m(
@@ -625,7 +625,7 @@ void test_vsuxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m(
@@ -634,7 +634,7 @@ void test_vsuxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m(
@@ -643,7 +643,7 @@ void test_vsuxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m(
@@ -652,7 +652,7 @@ void test_vsuxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m(
@@ -661,7 +661,7 @@ void test_vsuxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m(
@@ -670,7 +670,7 @@ void test_vsuxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m(
@@ -679,7 +679,7 @@ void test_vsuxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m(
@@ -688,7 +688,7 @@ void test_vsuxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m(
@@ -697,7 +697,7 @@ void test_vsuxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m(
@@ -706,7 +706,7 @@ void test_vsuxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m(
@@ -715,7 +715,7 @@ void test_vsuxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m(
@@ -724,7 +724,7 @@ void test_vsuxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m(
@@ -733,7 +733,7 @@ void test_vsuxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m(
@@ -742,6 +742,6 @@ void test_vsuxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c
index f9bc664a3b58..a8aa07c65a9b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4(
@@ -49,7 +49,7 @@ void test_vsuxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2(
@@ -58,7 +58,7 @@ void test_vsuxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1(
@@ -67,7 +67,7 @@ void test_vsuxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2(
@@ -76,7 +76,7 @@ void test_vsuxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4(
@@ -85,7 +85,7 @@ void test_vsuxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1(
@@ -94,7 +94,7 @@ void test_vsuxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2(
@@ -103,7 +103,7 @@ void test_vsuxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4(
@@ -112,7 +112,7 @@ void test_vsuxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8(
@@ -121,7 +121,7 @@ void test_vsuxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4(
@@ -130,7 +130,7 @@ void test_vsuxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2(
@@ -139,7 +139,7 @@ void test_vsuxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1(
@@ -148,7 +148,7 @@ void test_vsuxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2(
@@ -157,7 +157,7 @@ void test_vsuxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4(
@@ -166,7 +166,7 @@ void test_vsuxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4(
@@ -175,7 +175,7 @@ void test_vsuxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2(
@@ -184,7 +184,7 @@ void test_vsuxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1(
@@ -193,7 +193,7 @@ void test_vsuxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2(
@@ -202,7 +202,7 @@ void test_vsuxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4(
@@ -211,7 +211,7 @@ void test_vsuxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2(
@@ -238,7 +238,7 @@ void test_vsuxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4(
@@ -247,7 +247,7 @@ void test_vsuxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1(
@@ -256,7 +256,7 @@ void test_vsuxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2(
@@ -265,7 +265,7 @@ void test_vsuxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4(
@@ -274,7 +274,7 @@ void test_vsuxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8(
@@ -283,7 +283,7 @@ void test_vsuxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4(
@@ -292,7 +292,7 @@ void test_vsuxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2(
@@ -301,7 +301,7 @@ void test_vsuxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1(
@@ -310,7 +310,7 @@ void test_vsuxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2(
@@ -319,7 +319,7 @@ void test_vsuxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4(
@@ -328,7 +328,7 @@ void test_vsuxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4(
@@ -337,7 +337,7 @@ void test_vsuxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2(
@@ -346,7 +346,7 @@ void test_vsuxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1(
@@ -355,7 +355,7 @@ void test_vsuxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2(
@@ -364,7 +364,7 @@ void test_vsuxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4(
@@ -373,7 +373,7 @@ void test_vsuxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2(
@@ -382,7 +382,7 @@ void test_vsuxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1(
@@ -391,7 +391,7 @@ void test_vsuxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2(
@@ -400,7 +400,7 @@ void test_vsuxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4(
@@ -409,7 +409,7 @@ void test_vsuxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1(
@@ -418,7 +418,7 @@ void test_vsuxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2(
@@ -427,7 +427,7 @@ void test_vsuxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4(
@@ -436,7 +436,7 @@ void test_vsuxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsuxseg2ei8(base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4_m(
@@ -445,7 +445,7 @@ void test_vsuxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2_m(
@@ -472,7 +472,7 @@ void test_vsuxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4_m(
@@ -481,7 +481,7 @@ void test_vsuxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2_m(
@@ -490,7 +490,7 @@ void test_vsuxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1_m(
@@ -499,7 +499,7 @@ void test_vsuxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2_m(
@@ -508,7 +508,7 @@ void test_vsuxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4_m(
@@ -517,7 +517,7 @@ void test_vsuxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1_m(
@@ -526,7 +526,7 @@ void test_vsuxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2_m(
@@ -535,7 +535,7 @@ void test_vsuxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4_m(
@@ -544,7 +544,7 @@ void test_vsuxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m(
@@ -553,7 +553,7 @@ void test_vsuxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m(
@@ -598,7 +598,7 @@ void test_vsuxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m(
@@ -607,7 +607,7 @@ void test_vsuxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m(
@@ -616,7 +616,7 @@ void test_vsuxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m(
@@ -625,7 +625,7 @@ void test_vsuxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m(
@@ -634,7 +634,7 @@ void test_vsuxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m(
@@ -643,7 +643,7 @@ void test_vsuxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m(
@@ -652,7 +652,7 @@ void test_vsuxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m(
@@ -661,7 +661,7 @@ void test_vsuxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m(
@@ -670,7 +670,7 @@ void test_vsuxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m(
@@ -679,7 +679,7 @@ void test_vsuxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m(
@@ -688,7 +688,7 @@ void test_vsuxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m(
@@ -697,7 +697,7 @@ void test_vsuxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m(
@@ -706,7 +706,7 @@ void test_vsuxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m(
@@ -715,7 +715,7 @@ void test_vsuxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m(
@@ -724,7 +724,7 @@ void test_vsuxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m(
@@ -733,7 +733,7 @@ void test_vsuxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m(
@@ -742,7 +742,7 @@ void test_vsuxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m(
@@ -751,7 +751,7 @@ void test_vsuxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m(
@@ -760,7 +760,7 @@ void test_vsuxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m(
@@ -769,7 +769,7 @@ void test_vsuxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m(
@@ -778,7 +778,7 @@ void test_vsuxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m(
@@ -787,7 +787,7 @@ void test_vsuxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m(
@@ -796,7 +796,7 @@ void test_vsuxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m(
@@ -805,7 +805,7 @@ void test_vsuxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m(
@@ -814,7 +814,7 @@ void test_vsuxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m(
@@ -823,7 +823,7 @@ void test_vsuxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m(
@@ -832,7 +832,7 @@ void test_vsuxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m(
@@ -841,7 +841,7 @@ void test_vsuxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m(
@@ -850,7 +850,7 @@ void test_vsuxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m(
@@ -859,7 +859,7 @@ void test_vsuxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m(
@@ -868,6 +868,6 @@ void test_vsuxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
- return vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
+ return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c
index 124ab0b6ea08..c4b2cbe7518e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsuxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsuxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsuxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsuxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsuxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsuxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsuxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsuxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsuxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsuxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsuxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsuxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsuxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsuxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsuxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsuxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsuxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsuxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsuxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsuxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsuxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsuxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsuxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsuxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsuxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsuxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsuxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsuxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsuxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsuxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsuxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsuxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsuxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsuxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsuxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsuxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsuxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsuxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsuxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsuxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsuxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsuxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsuxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsuxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsuxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsuxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsuxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsuxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsuxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsuxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsuxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsuxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsuxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsuxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsuxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsuxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsuxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsuxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsuxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsuxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c
index c4719394476f..cab948ab5a4a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsuxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsuxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsuxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsuxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsuxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsuxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsuxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsuxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsuxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsuxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsuxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsuxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsuxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsuxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsuxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsuxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsuxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsuxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsuxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsuxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsuxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsuxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsuxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsuxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsuxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsuxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsuxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsuxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsuxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsuxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsuxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsuxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsuxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsuxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsuxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsuxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsuxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsuxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsuxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsuxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsuxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsuxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsuxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsuxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsuxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsuxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsuxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsuxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsuxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsuxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsuxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsuxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsuxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsuxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsuxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsuxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsuxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsuxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsuxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsuxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c
index b8aab2d48230..51d243e13c0f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsuxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsuxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsuxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsuxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4(
@@ -130,7 +130,7 @@ void test_vsuxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2(
@@ -139,7 +139,7 @@ void test_vsuxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1(
@@ -148,7 +148,7 @@ void test_vsuxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2(
@@ -157,7 +157,7 @@ void test_vsuxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2(
@@ -166,7 +166,7 @@ void test_vsuxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1(
@@ -175,7 +175,7 @@ void test_vsuxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2(
@@ -184,7 +184,7 @@ void test_vsuxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1(
@@ -193,7 +193,7 @@ void test_vsuxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2(
@@ -202,7 +202,7 @@ void test_vsuxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8(
@@ -211,7 +211,7 @@ void test_vsuxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4(
@@ -220,7 +220,7 @@ void test_vsuxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2(
@@ -229,7 +229,7 @@ void test_vsuxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1(
@@ -238,7 +238,7 @@ void test_vsuxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4(
@@ -247,7 +247,7 @@ void test_vsuxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2(
@@ -256,7 +256,7 @@ void test_vsuxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1(
@@ -265,7 +265,7 @@ void test_vsuxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2(
@@ -274,7 +274,7 @@ void test_vsuxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2(
@@ -283,7 +283,7 @@ void test_vsuxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1(
@@ -292,7 +292,7 @@ void test_vsuxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2(
@@ -301,7 +301,7 @@ void test_vsuxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1(
@@ -310,7 +310,7 @@ void test_vsuxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2(
@@ -319,7 +319,7 @@ void test_vsuxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4_m(
@@ -328,7 +328,7 @@ void test_vsuxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2_m(
@@ -337,7 +337,7 @@ void test_vsuxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1_m(
@@ -346,7 +346,7 @@ void test_vsuxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2_m(
@@ -355,7 +355,7 @@ void test_vsuxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2_m(
@@ -382,7 +382,7 @@ void test_vsuxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1_m(
@@ -391,7 +391,7 @@ void test_vsuxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2_m(
@@ -400,7 +400,7 @@ void test_vsuxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m(
@@ -409,7 +409,7 @@ void test_vsuxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m(
@@ -418,7 +418,7 @@ void test_vsuxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m(
@@ -427,7 +427,7 @@ void test_vsuxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m(
@@ -436,7 +436,7 @@ void test_vsuxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m(
@@ -445,7 +445,7 @@ void test_vsuxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m(
@@ -472,7 +472,7 @@ void test_vsuxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m(
@@ -481,7 +481,7 @@ void test_vsuxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m(
@@ -490,7 +490,7 @@ void test_vsuxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m(
@@ -499,7 +499,7 @@ void test_vsuxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m(
@@ -508,7 +508,7 @@ void test_vsuxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m(
@@ -517,7 +517,7 @@ void test_vsuxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m(
@@ -526,7 +526,7 @@ void test_vsuxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m(
@@ -535,7 +535,7 @@ void test_vsuxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m(
@@ -544,7 +544,7 @@ void test_vsuxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m(
@@ -553,7 +553,7 @@ void test_vsuxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m(
@@ -598,7 +598,7 @@ void test_vsuxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m(
@@ -607,7 +607,7 @@ void test_vsuxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m(
@@ -616,7 +616,7 @@ void test_vsuxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m(
@@ -625,7 +625,7 @@ void test_vsuxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m(
@@ -634,6 +634,6 @@ void test_vsuxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c
index 46c76bd1660a..1e1e5adef091 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsuxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsuxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsuxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsuxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsuxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsuxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsuxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsuxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsuxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsuxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsuxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsuxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsuxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsuxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsuxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsuxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsuxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsuxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsuxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsuxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsuxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsuxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsuxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsuxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsuxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsuxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsuxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsuxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsuxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsuxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsuxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsuxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsuxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsuxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsuxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsuxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsuxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsuxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsuxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsuxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsuxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsuxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsuxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsuxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsuxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsuxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsuxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsuxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsuxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsuxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsuxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsuxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsuxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsuxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsuxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsuxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsuxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsuxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsuxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsuxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
- return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
+ return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c
index ccf38f3c735e..68ac696975b7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsuxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsuxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsuxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsuxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsuxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsuxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsuxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsuxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsuxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsuxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsuxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsuxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsuxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsuxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsuxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsuxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsuxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsuxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsuxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsuxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsuxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsuxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsuxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsuxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsuxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsuxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsuxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsuxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsuxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsuxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsuxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsuxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsuxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsuxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsuxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsuxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsuxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsuxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsuxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsuxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsuxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsuxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsuxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsuxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsuxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsuxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsuxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsuxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsuxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsuxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsuxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsuxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsuxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsuxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsuxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsuxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsuxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsuxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsuxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsuxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c
index 47c537be0f4b..77f1ab686b87 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsuxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsuxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsuxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsuxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsuxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsuxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsuxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsuxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsuxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsuxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsuxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsuxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsuxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsuxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsuxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsuxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsuxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsuxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsuxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsuxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsuxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsuxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsuxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsuxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsuxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsuxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsuxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsuxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsuxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsuxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsuxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsuxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsuxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsuxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsuxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsuxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsuxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsuxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsuxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsuxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsuxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsuxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsuxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsuxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsuxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsuxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsuxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsuxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsuxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsuxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsuxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsuxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsuxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsuxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsuxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsuxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsuxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsuxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsuxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsuxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c
index cbfaa78cbc2f..66cfd638d47c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsuxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsuxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsuxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsuxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4(
@@ -130,7 +130,7 @@ void test_vsuxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2(
@@ -139,7 +139,7 @@ void test_vsuxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1(
@@ -148,7 +148,7 @@ void test_vsuxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2(
@@ -157,7 +157,7 @@ void test_vsuxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2(
@@ -166,7 +166,7 @@ void test_vsuxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1(
@@ -175,7 +175,7 @@ void test_vsuxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2(
@@ -184,7 +184,7 @@ void test_vsuxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1(
@@ -193,7 +193,7 @@ void test_vsuxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2(
@@ -202,7 +202,7 @@ void test_vsuxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8(
@@ -211,7 +211,7 @@ void test_vsuxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4(
@@ -220,7 +220,7 @@ void test_vsuxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2(
@@ -229,7 +229,7 @@ void test_vsuxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1(
@@ -238,7 +238,7 @@ void test_vsuxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4(
@@ -247,7 +247,7 @@ void test_vsuxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2(
@@ -256,7 +256,7 @@ void test_vsuxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1(
@@ -265,7 +265,7 @@ void test_vsuxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2(
@@ -274,7 +274,7 @@ void test_vsuxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2(
@@ -283,7 +283,7 @@ void test_vsuxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1(
@@ -292,7 +292,7 @@ void test_vsuxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2(
@@ -301,7 +301,7 @@ void test_vsuxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1(
@@ -310,7 +310,7 @@ void test_vsuxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2(
@@ -319,7 +319,7 @@ void test_vsuxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4_m(
@@ -328,7 +328,7 @@ void test_vsuxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2_m(
@@ -337,7 +337,7 @@ void test_vsuxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1_m(
@@ -346,7 +346,7 @@ void test_vsuxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2_m(
@@ -355,7 +355,7 @@ void test_vsuxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2_m(
@@ -382,7 +382,7 @@ void test_vsuxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1_m(
@@ -391,7 +391,7 @@ void test_vsuxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2_m(
@@ -400,7 +400,7 @@ void test_vsuxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m(
@@ -409,7 +409,7 @@ void test_vsuxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m(
@@ -418,7 +418,7 @@ void test_vsuxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m(
@@ -427,7 +427,7 @@ void test_vsuxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m(
@@ -436,7 +436,7 @@ void test_vsuxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m(
@@ -445,7 +445,7 @@ void test_vsuxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m(
@@ -472,7 +472,7 @@ void test_vsuxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m(
@@ -481,7 +481,7 @@ void test_vsuxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m(
@@ -490,7 +490,7 @@ void test_vsuxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m(
@@ -499,7 +499,7 @@ void test_vsuxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m(
@@ -508,7 +508,7 @@ void test_vsuxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m(
@@ -517,7 +517,7 @@ void test_vsuxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m(
@@ -526,7 +526,7 @@ void test_vsuxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m(
@@ -535,7 +535,7 @@ void test_vsuxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m(
@@ -544,7 +544,7 @@ void test_vsuxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m(
@@ -553,7 +553,7 @@ void test_vsuxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m(
@@ -598,7 +598,7 @@ void test_vsuxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m(
@@ -607,7 +607,7 @@ void test_vsuxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m(
@@ -616,7 +616,7 @@ void test_vsuxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m(
@@ -625,7 +625,7 @@ void test_vsuxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m(
@@ -634,6 +634,6 @@ void test_vsuxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c
index 381b9bd538c7..533499500f9b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2(
@@ -40,7 +40,7 @@ void test_vsuxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2(
@@ -49,7 +49,7 @@ void test_vsuxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1(
@@ -58,7 +58,7 @@ void test_vsuxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2(
@@ -67,7 +67,7 @@ void test_vsuxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1(
@@ -76,7 +76,7 @@ void test_vsuxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2(
@@ -85,7 +85,7 @@ void test_vsuxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8(
@@ -94,7 +94,7 @@ void test_vsuxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1(
@@ -121,7 +121,7 @@ void test_vsuxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2(
@@ -130,7 +130,7 @@ void test_vsuxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4(
@@ -139,7 +139,7 @@ void test_vsuxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2(
@@ -148,7 +148,7 @@ void test_vsuxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1(
@@ -157,7 +157,7 @@ void test_vsuxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2(
@@ -166,7 +166,7 @@ void test_vsuxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1(
@@ -184,7 +184,7 @@ void test_vsuxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2(
@@ -193,7 +193,7 @@ void test_vsuxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1(
@@ -202,7 +202,7 @@ void test_vsuxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2(
@@ -211,7 +211,7 @@ void test_vsuxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8(
@@ -220,7 +220,7 @@ void test_vsuxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4(
@@ -229,7 +229,7 @@ void test_vsuxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2(
@@ -238,7 +238,7 @@ void test_vsuxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1(
@@ -247,7 +247,7 @@ void test_vsuxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2(
@@ -256,7 +256,7 @@ void test_vsuxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4(
@@ -265,7 +265,7 @@ void test_vsuxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2(
@@ -274,7 +274,7 @@ void test_vsuxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1(
@@ -283,7 +283,7 @@ void test_vsuxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2(
@@ -292,7 +292,7 @@ void test_vsuxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2(
@@ -301,7 +301,7 @@ void test_vsuxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1(
@@ -310,7 +310,7 @@ void test_vsuxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2(
@@ -319,7 +319,7 @@ void test_vsuxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1(
@@ -328,7 +328,7 @@ void test_vsuxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2(
@@ -337,7 +337,7 @@ void test_vsuxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4_m(
@@ -346,7 +346,7 @@ void test_vsuxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2_m(
@@ -355,7 +355,7 @@ void test_vsuxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1_m(
@@ -364,7 +364,7 @@ void test_vsuxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2_m(
@@ -373,7 +373,7 @@ void test_vsuxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2_m(
@@ -382,7 +382,7 @@ void test_vsuxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1_m(
@@ -391,7 +391,7 @@ void test_vsuxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2_m(
@@ -400,7 +400,7 @@ void test_vsuxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1_m(
@@ -409,7 +409,7 @@ void test_vsuxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2_m(
@@ -418,7 +418,7 @@ void test_vsuxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m(
@@ -427,7 +427,7 @@ void test_vsuxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4_m(
@@ -436,7 +436,7 @@ void test_vsuxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m(
@@ -445,7 +445,7 @@ void test_vsuxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m(
@@ -454,7 +454,7 @@ void test_vsuxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m(
@@ -463,7 +463,7 @@ void test_vsuxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m(
@@ -472,7 +472,7 @@ void test_vsuxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m(
@@ -481,7 +481,7 @@ void test_vsuxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m(
@@ -490,7 +490,7 @@ void test_vsuxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m(
@@ -499,7 +499,7 @@ void test_vsuxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m(
@@ -508,7 +508,7 @@ void test_vsuxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m(
@@ -517,7 +517,7 @@ void test_vsuxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m(
@@ -526,7 +526,7 @@ void test_vsuxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m(
@@ -535,7 +535,7 @@ void test_vsuxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m(
@@ -544,7 +544,7 @@ void test_vsuxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m(
@@ -553,7 +553,7 @@ void test_vsuxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m(
@@ -562,7 +562,7 @@ void test_vsuxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m(
@@ -571,7 +571,7 @@ void test_vsuxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m(
@@ -580,7 +580,7 @@ void test_vsuxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m(
@@ -589,7 +589,7 @@ void test_vsuxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m(
@@ -598,7 +598,7 @@ void test_vsuxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m(
@@ -607,7 +607,7 @@ void test_vsuxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m(
@@ -616,7 +616,7 @@ void test_vsuxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m(
@@ -625,7 +625,7 @@ void test_vsuxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m(
@@ -634,7 +634,7 @@ void test_vsuxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m(
@@ -643,7 +643,7 @@ void test_vsuxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m(
@@ -652,7 +652,7 @@ void test_vsuxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m(
@@ -661,7 +661,7 @@ void test_vsuxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m(
@@ -670,6 +670,6 @@ void test_vsuxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
- return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
+ return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c
index eb5625dde451..6bbe9efe2661 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c
index eaa33deb40f5..5d20316f15d3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c
index b0524fb88c3c..5097904ffb98 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c
index 0d2cc84c5b59..5e757a32c4a9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
- return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+ return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c
index f26a60298d61..5798eea436b9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c
index 68def9929a6e..fa3dbc91b4cb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c
index 9cf1aa629d1a..d513df717a3e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c
index ca24ed128fc4..90744cff36e5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
- return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+ return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c
index 4ef9076faac3..85f30a4b871e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c
index 241ff2283ae2..ea0dc505b239 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c
index 43cee9c39d67..35ca8256f985 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c
index f6f61c924cd2..11430143914d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
- return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+ return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c
index 7805fd265e56..f0904656120a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c
index 0eb9bf6b57a3..f5636edbe6fa 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c
index 253793185a4b..e22a3e725f9e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c
index 339407b5102b..5645235c01f4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2(
@@ -22,7 +22,7 @@ void test_vsuxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1(
@@ -31,7 +31,7 @@ void test_vsuxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2(
@@ -40,7 +40,7 @@ void test_vsuxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1(
@@ -49,7 +49,7 @@ void test_vsuxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1(
@@ -58,7 +58,7 @@ void test_vsuxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8(
@@ -67,7 +67,7 @@ void test_vsuxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4(
@@ -76,7 +76,7 @@ void test_vsuxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2(
@@ -85,7 +85,7 @@ void test_vsuxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1(
@@ -94,7 +94,7 @@ void test_vsuxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4(
@@ -103,7 +103,7 @@ void test_vsuxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2(
@@ -112,7 +112,7 @@ void test_vsuxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1(
@@ -121,7 +121,7 @@ void test_vsuxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2(
@@ -130,7 +130,7 @@ void test_vsuxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1(
@@ -139,7 +139,7 @@ void test_vsuxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1(
@@ -148,7 +148,7 @@ void test_vsuxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8(
@@ -157,7 +157,7 @@ void test_vsuxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4(
@@ -166,7 +166,7 @@ void test_vsuxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2(
@@ -175,7 +175,7 @@ void test_vsuxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1(
@@ -184,7 +184,7 @@ void test_vsuxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4(
@@ -193,7 +193,7 @@ void test_vsuxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2(
@@ -202,7 +202,7 @@ void test_vsuxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1(
@@ -211,7 +211,7 @@ void test_vsuxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2(
@@ -220,7 +220,7 @@ void test_vsuxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1(
@@ -229,7 +229,7 @@ void test_vsuxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1(
@@ -238,7 +238,7 @@ void test_vsuxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4_m(
@@ -247,7 +247,7 @@ void test_vsuxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2_m(
@@ -256,7 +256,7 @@ void test_vsuxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1_m(
@@ -265,7 +265,7 @@ void test_vsuxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2_m(
@@ -274,7 +274,7 @@ void test_vsuxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1_m(
@@ -283,7 +283,7 @@ void test_vsuxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1_m(
@@ -292,7 +292,7 @@ void test_vsuxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m(
@@ -301,7 +301,7 @@ void test_vsuxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m(
@@ -310,7 +310,7 @@ void test_vsuxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m(
@@ -319,7 +319,7 @@ void test_vsuxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m(
@@ -328,7 +328,7 @@ void test_vsuxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m(
@@ -337,7 +337,7 @@ void test_vsuxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m(
@@ -346,7 +346,7 @@ void test_vsuxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m(
@@ -355,7 +355,7 @@ void test_vsuxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m(
@@ -364,7 +364,7 @@ void test_vsuxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m(
@@ -373,7 +373,7 @@ void test_vsuxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m(
@@ -382,7 +382,7 @@ void test_vsuxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m(
@@ -391,7 +391,7 @@ void test_vsuxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m(
@@ -400,7 +400,7 @@ void test_vsuxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m(
@@ -409,7 +409,7 @@ void test_vsuxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m(
@@ -418,7 +418,7 @@ void test_vsuxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m(
@@ -427,7 +427,7 @@ void test_vsuxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m(
@@ -436,7 +436,7 @@ void test_vsuxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m(
@@ -445,7 +445,7 @@ void test_vsuxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m(
@@ -454,7 +454,7 @@ void test_vsuxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m(
@@ -463,7 +463,7 @@ void test_vsuxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m(
@@ -472,6 +472,6 @@ void test_vsuxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind
// CHECK-RV64-NEXT: ret void
//
void test_vsuxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
- return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+ return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwadd.c
index bcbdbd81ec89..c560b5217784 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwadd.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
@@ -30,7 +30,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
@@ -39,7 +39,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
@@ -48,7 +48,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
@@ -57,7 +57,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
@@ -66,7 +66,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
@@ -75,7 +75,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
@@ -84,7 +84,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
@@ -93,7 +93,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
@@ -102,7 +102,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
@@ -111,7 +111,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
@@ -120,7 +120,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
@@ -129,7 +129,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
@@ -138,7 +138,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
@@ -147,7 +147,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
@@ -156,7 +156,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
@@ -165,7 +165,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
@@ -174,7 +174,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
@@ -183,7 +183,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
@@ -192,7 +192,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
@@ -201,7 +201,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
@@ -210,7 +210,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
@@ -219,7 +219,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
@@ -228,7 +228,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
@@ -237,7 +237,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
@@ -246,7 +246,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
@@ -282,7 +282,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
@@ -291,7 +291,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
@@ -300,7 +300,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
@@ -309,7 +309,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
@@ -318,7 +318,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
@@ -327,7 +327,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
@@ -336,7 +336,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
@@ -345,7 +345,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
@@ -354,7 +354,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
@@ -363,7 +363,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
@@ -372,7 +372,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
@@ -381,7 +381,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
@@ -390,7 +390,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
@@ -399,7 +399,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
@@ -408,7 +408,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
@@ -417,7 +417,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
@@ -426,7 +426,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
@@ -435,7 +435,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
@@ -444,7 +444,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
@@ -453,7 +453,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
@@ -462,7 +462,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
@@ -471,7 +471,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
@@ -480,7 +480,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
@@ -489,7 +489,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
@@ -498,7 +498,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
@@ -507,7 +507,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
@@ -516,7 +516,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_vv(op1, op2, vl);
+ return __riscv_vwadd_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
@@ -525,7 +525,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vwadd_vx(op1, op2, vl);
+ return __riscv_vwadd_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
@@ -534,7 +534,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_wv(op1, op2, vl);
+ return __riscv_vwadd_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
@@ -543,7 +543,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
- return vwadd_wx(op1, op2, vl);
+ return __riscv_vwadd_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m(
@@ -552,7 +552,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m(
@@ -561,7 +561,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m(
@@ -570,7 +570,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m(
@@ -579,7 +579,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m(
@@ -588,7 +588,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m(
@@ -597,7 +597,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m(
@@ -606,7 +606,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m(
@@ -615,7 +615,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m(
@@ -624,7 +624,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m(
@@ -633,7 +633,7 @@ vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m(
@@ -642,7 +642,7 @@ vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m(
@@ -651,7 +651,7 @@ vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m(
@@ -660,7 +660,7 @@ vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m(
@@ -669,7 +669,7 @@ vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m(
@@ -678,7 +678,7 @@ vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m(
@@ -687,7 +687,7 @@ vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m(
@@ -696,7 +696,7 @@ vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m(
@@ -705,7 +705,7 @@ vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m(
@@ -714,7 +714,7 @@ vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m(
@@ -723,7 +723,7 @@ vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m(
@@ -732,7 +732,7 @@ vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m(
@@ -741,7 +741,7 @@ vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m(
@@ -750,7 +750,7 @@ vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m(
@@ -759,7 +759,7 @@ vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m(
@@ -768,7 +768,7 @@ vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m(
@@ -777,7 +777,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m(
@@ -786,7 +786,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m(
@@ -795,7 +795,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m(
@@ -804,7 +804,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m(
@@ -813,7 +813,7 @@ vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m(
@@ -822,7 +822,7 @@ vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m(
@@ -831,7 +831,7 @@ vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m(
@@ -840,7 +840,7 @@ vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m(
@@ -849,7 +849,7 @@ vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m(
@@ -858,7 +858,7 @@ vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m(
@@ -867,7 +867,7 @@ vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m(
@@ -876,7 +876,7 @@ vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m(
@@ -885,7 +885,7 @@ vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m(
@@ -894,7 +894,7 @@ vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m(
@@ -903,7 +903,7 @@ vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m(
@@ -912,7 +912,7 @@ vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m(
@@ -921,7 +921,7 @@ vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m(
@@ -930,7 +930,7 @@ vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m(
@@ -939,7 +939,7 @@ vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m(
@@ -948,7 +948,7 @@ vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m(
@@ -957,7 +957,7 @@ vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m(
@@ -966,7 +966,7 @@ vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m(
@@ -975,7 +975,7 @@ vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m(
@@ -984,7 +984,7 @@ vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m(
@@ -993,7 +993,7 @@ vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m(
@@ -1002,7 +1002,7 @@ vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m(
@@ -1011,7 +1011,7 @@ vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m(
@@ -1020,7 +1020,7 @@ vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m(
@@ -1029,7 +1029,7 @@ vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m(
@@ -1038,7 +1038,7 @@ vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m(
@@ -1047,7 +1047,7 @@ vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m(
@@ -1056,7 +1056,7 @@ vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_vv(mask, op1, op2, vl);
+ return __riscv_vwadd_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m(
@@ -1065,7 +1065,7 @@ vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwadd_vx(mask, op1, op2, vl);
+ return __riscv_vwadd_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m(
@@ -1074,7 +1074,7 @@ vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_wv(mask, op1, op2, vl);
+ return __riscv_vwadd_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m(
@@ -1083,6 +1083,6 @@ vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwadd_wx(mask, op1, op2, vl);
+ return __riscv_vwadd_wx(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwaddu.c
index 8b7a500d2849..c2624811e597 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwaddu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwaddu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
@@ -30,7 +30,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
@@ -39,7 +39,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
@@ -48,7 +48,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
@@ -57,7 +57,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
@@ -66,7 +66,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
@@ -75,7 +75,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
@@ -84,7 +84,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
@@ -93,7 +93,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
@@ -102,7 +102,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
@@ -111,7 +111,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
@@ -120,7 +120,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
@@ -129,7 +129,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
@@ -138,7 +138,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
@@ -147,7 +147,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
@@ -156,7 +156,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
@@ -165,7 +165,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
@@ -174,7 +174,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
@@ -183,7 +183,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
@@ -192,7 +192,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
@@ -201,7 +201,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
@@ -210,7 +210,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
@@ -219,7 +219,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
@@ -228,7 +228,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
@@ -237,7 +237,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
@@ -246,7 +246,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
@@ -282,7 +282,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
@@ -291,7 +291,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
@@ -300,7 +300,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
@@ -309,7 +309,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
@@ -318,7 +318,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
@@ -327,7 +327,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
@@ -336,7 +336,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
@@ -345,7 +345,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
@@ -354,7 +354,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
@@ -363,7 +363,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
@@ -372,7 +372,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
@@ -381,7 +381,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
@@ -390,7 +390,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
@@ -399,7 +399,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
@@ -408,7 +408,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
@@ -417,7 +417,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
@@ -426,7 +426,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
@@ -435,7 +435,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
@@ -444,7 +444,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
@@ -453,7 +453,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
@@ -462,7 +462,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
@@ -471,7 +471,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
@@ -480,7 +480,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
@@ -489,7 +489,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
@@ -498,7 +498,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
@@ -507,7 +507,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
@@ -516,7 +516,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_vv(op1, op2, vl);
+ return __riscv_vwaddu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
@@ -525,7 +525,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx(op1, op2, vl);
+ return __riscv_vwaddu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
@@ -534,7 +534,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_wv(op1, op2, vl);
+ return __riscv_vwaddu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
@@ -543,7 +543,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx(op1, op2, vl);
+ return __riscv_vwaddu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m(
@@ -552,7 +552,7 @@ vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m(
@@ -561,7 +561,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m(
@@ -570,7 +570,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m(
@@ -579,7 +579,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m(
@@ -588,7 +588,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m(
@@ -597,7 +597,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m(
@@ -606,7 +606,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m(
@@ -615,7 +615,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m(
@@ -624,7 +624,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m(
@@ -633,7 +633,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m(
@@ -642,7 +642,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m(
@@ -651,7 +651,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m(
@@ -660,7 +660,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m(
@@ -669,7 +669,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m(
@@ -678,7 +678,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m(
@@ -687,7 +687,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m(
@@ -696,7 +696,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m(
@@ -705,7 +705,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m(
@@ -714,7 +714,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m(
@@ -723,7 +723,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m(
@@ -732,7 +732,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m(
@@ -741,7 +741,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m(
@@ -750,7 +750,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m(
@@ -759,7 +759,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m(
@@ -768,7 +768,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m(
@@ -777,7 +777,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m(
@@ -786,7 +786,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m(
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m(
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m(
@@ -813,7 +813,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m(
@@ -822,7 +822,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m(
@@ -831,7 +831,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m(
@@ -840,7 +840,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m(
@@ -849,7 +849,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m(
@@ -858,7 +858,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m(
@@ -867,7 +867,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m(
@@ -876,7 +876,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m(
@@ -885,7 +885,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m(
@@ -894,7 +894,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m(
@@ -903,7 +903,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m(
@@ -912,7 +912,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m(
@@ -921,7 +921,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m(
@@ -930,7 +930,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m(
@@ -939,7 +939,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m(
@@ -948,7 +948,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m(
@@ -957,7 +957,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m(
@@ -966,7 +966,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m(
@@ -975,7 +975,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m(
@@ -984,7 +984,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m(
@@ -993,7 +993,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m(
@@ -1002,7 +1002,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m(
@@ -1011,7 +1011,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m(
@@ -1020,7 +1020,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m(
@@ -1029,7 +1029,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m(
@@ -1038,7 +1038,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m(
@@ -1047,7 +1047,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m(
@@ -1056,7 +1056,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_vv(mask, op1, op2, vl);
+ return __riscv_vwaddu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m(
@@ -1065,7 +1065,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx(mask, op1, op2, vl);
+ return __riscv_vwaddu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m(
@@ -1074,7 +1074,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_wv(mask, op1, op2, vl);
+ return __riscv_vwaddu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m(
@@ -1083,6 +1083,6 @@ vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx(mask, op1, op2, vl);
+ return __riscv_vwaddu_wx(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvt.c
index fb8641badb3b..0d9c9069aea0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvt.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwcvt_x_x_v_i16mf4(vint8mf8_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4(vint8mf8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwcvt_x_x_v_i16mf2(vint8mf4_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
@@ -30,7 +30,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2(vint8mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwcvt_x_x_v_i16m1(vint8mf2_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
@@ -39,7 +39,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1(vint8mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwcvt_x_x_v_i16m2(vint8m1_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
@@ -48,7 +48,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2(vint8m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwcvt_x_x_v_i16m4(vint8m2_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
@@ -57,7 +57,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4(vint8m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwcvt_x_x_v_i16m8(vint8m4_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
@@ -66,7 +66,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8(vint8m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwcvt_x_x_v_i32mf2(vint16mf4_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
@@ -75,7 +75,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2(vint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwcvt_x_x_v_i32m1(vint16mf2_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
@@ -84,7 +84,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1(vint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwcvt_x_x_v_i32m2(vint16m1_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
@@ -93,7 +93,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2(vint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwcvt_x_x_v_i32m4(vint16m2_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
@@ -102,7 +102,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4(vint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwcvt_x_x_v_i32m8(vint16m4_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
@@ -111,7 +111,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8(vint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwcvt_x_x_v_i64m1(vint32mf2_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
@@ -120,7 +120,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1(vint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwcvt_x_x_v_i64m2(vint32m1_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
@@ -129,7 +129,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2(vint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwcvt_x_x_v_i64m4(vint32m2_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
@@ -138,7 +138,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4(vint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwcvt_x_x_v_i64m8(vint32m4_t src, size_t vl) {
- return vwcvt_x(src, vl);
+ return __riscv_vwcvt_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m(
@@ -147,7 +147,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8(vint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m(
@@ -156,7 +156,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m(
@@ -165,7 +165,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m(
@@ -174,7 +174,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m(
@@ -183,7 +183,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m(
@@ -192,7 +192,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m(
@@ -201,7 +201,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m(
@@ -210,7 +210,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m(
@@ -219,7 +219,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m(
@@ -228,7 +228,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m(
@@ -237,7 +237,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m(
@@ -246,7 +246,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m(
@@ -255,7 +255,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m(
@@ -264,7 +264,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m(
@@ -273,6 +273,6 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) {
- return vwcvt_x(mask, src, vl);
+ return __riscv_vwcvt_x(mask, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvtu.c
index 73ddfc01c4fb..dc7a14f53da1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvtu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvtu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
@@ -30,7 +30,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwcvtu_x_x_v_u16m1(vuint8mf2_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
@@ -39,7 +39,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1(vuint8mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwcvtu_x_x_v_u16m2(vuint8m1_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
@@ -48,7 +48,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2(vuint8m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwcvtu_x_x_v_u16m4(vuint8m2_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
@@ -57,7 +57,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4(vuint8m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwcvtu_x_x_v_u16m8(vuint8m4_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
@@ -66,7 +66,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8(vuint8m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
@@ -75,7 +75,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwcvtu_x_x_v_u32m1(vuint16mf2_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
@@ -84,7 +84,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1(vuint16mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwcvtu_x_x_v_u32m2(vuint16m1_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
@@ -93,7 +93,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2(vuint16m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwcvtu_x_x_v_u32m4(vuint16m2_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
@@ -102,7 +102,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4(vuint16m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwcvtu_x_x_v_u32m8(vuint16m4_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
@@ -111,7 +111,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8(vuint16m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwcvtu_x_x_v_u64m1(vuint32mf2_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
@@ -120,7 +120,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwcvtu_x_x_v_u64m2(vuint32m1_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
@@ -129,7 +129,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2(vuint32m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwcvtu_x_x_v_u64m4(vuint32m2_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
@@ -138,7 +138,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4(vuint32m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwcvtu_x_x_v_u64m8(vuint32m4_t src, size_t vl) {
- return vwcvtu_x(src, vl);
+ return __riscv_vwcvtu_x(src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m(
@@ -147,7 +147,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8(vuint32m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m(
@@ -174,7 +174,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m(
@@ -183,7 +183,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m(
@@ -192,7 +192,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m(
@@ -201,7 +201,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m(
@@ -210,7 +210,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m(
@@ -219,7 +219,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint16mf2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m(
@@ -228,7 +228,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m(
@@ -237,7 +237,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m(
@@ -246,7 +246,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m(
@@ -255,7 +255,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint32mf2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m(
@@ -264,7 +264,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m(
@@ -273,6 +273,6 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) {
- return vwcvtu_x(mask, src, vl);
+ return __riscv_vwcvtu_x(mask, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmacc.c
index 081585e8076d..067fc2af4939 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
@@ -22,7 +22,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
@@ -31,7 +31,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
@@ -40,7 +40,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
@@ -49,7 +49,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
@@ -58,7 +58,7 @@ vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
@@ -67,7 +67,7 @@ vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
@@ -76,7 +76,7 @@ vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
@@ -85,7 +85,7 @@ vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
@@ -94,7 +94,7 @@ vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
@@ -103,7 +103,7 @@ vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
@@ -112,7 +112,7 @@ vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
@@ -121,7 +121,7 @@ vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
@@ -130,7 +130,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
@@ -139,7 +139,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
@@ -148,7 +148,7 @@ vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
@@ -157,7 +157,7 @@ vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
@@ -166,7 +166,7 @@ vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
@@ -175,7 +175,7 @@ vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
@@ -184,7 +184,7 @@ vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
@@ -193,7 +193,7 @@ vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
@@ -202,7 +202,7 @@ vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
@@ -211,7 +211,7 @@ vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
@@ -220,7 +220,7 @@ vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
@@ -229,7 +229,7 @@ vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
@@ -238,7 +238,7 @@ vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
@@ -247,7 +247,7 @@ vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
@@ -256,7 +256,7 @@ vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
@@ -265,7 +265,7 @@ vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vwmacc(vd, vs1, vs2, vl);
+ return __riscv_vwmacc(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
@@ -274,7 +274,7 @@ vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmacc(vd, rs1, vs2, vl);
+ return __riscv_vwmacc(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
@@ -283,7 +283,7 @@ vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
@@ -292,7 +292,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
@@ -301,7 +301,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
@@ -310,7 +310,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
@@ -319,7 +319,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
@@ -328,7 +328,7 @@ vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
@@ -337,7 +337,7 @@ vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
@@ -346,7 +346,7 @@ vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
@@ -355,7 +355,7 @@ vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
@@ -364,7 +364,7 @@ vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
@@ -373,7 +373,7 @@ vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
@@ -382,7 +382,7 @@ vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
@@ -391,7 +391,7 @@ vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
@@ -400,7 +400,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
@@ -409,7 +409,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
@@ -418,7 +418,7 @@ vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
@@ -427,7 +427,7 @@ vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
@@ -436,7 +436,7 @@ vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
@@ -445,7 +445,7 @@ vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
@@ -454,7 +454,7 @@ vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
@@ -463,7 +463,7 @@ vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
@@ -472,7 +472,7 @@ vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
@@ -481,7 +481,7 @@ vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
@@ -490,7 +490,7 @@ vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
@@ -499,7 +499,7 @@ vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
@@ -508,7 +508,7 @@ vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
@@ -517,7 +517,7 @@ vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
@@ -526,7 +526,7 @@ vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
@@ -535,7 +535,7 @@ vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vwmacc(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
@@ -544,6 +544,6 @@ vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmacc(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccsu.c
index 6f001c9dbe14..4250e900e47c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccsu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccsu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
@@ -22,7 +22,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
@@ -31,7 +31,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
@@ -40,7 +40,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
@@ -49,7 +49,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
@@ -58,7 +58,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
@@ -67,7 +67,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
@@ -76,7 +76,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
@@ -85,7 +85,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
@@ -94,7 +94,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
@@ -103,7 +103,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
@@ -112,7 +112,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
@@ -121,7 +121,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
@@ -130,7 +130,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
@@ -139,7 +139,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
@@ -148,7 +148,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
@@ -157,7 +157,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
@@ -166,7 +166,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
@@ -175,7 +175,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
@@ -184,7 +184,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
@@ -193,7 +193,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
@@ -202,7 +202,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
@@ -211,7 +211,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
@@ -220,7 +220,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
@@ -229,7 +229,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
@@ -238,7 +238,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
@@ -247,7 +247,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
@@ -256,7 +256,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
@@ -265,7 +265,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
@@ -274,7 +274,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
@@ -283,7 +283,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
@@ -292,7 +292,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
@@ -301,7 +301,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
@@ -310,7 +310,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
@@ -319,7 +319,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
@@ -328,7 +328,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
@@ -337,7 +337,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
@@ -346,7 +346,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
@@ -355,7 +355,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
@@ -364,7 +364,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
@@ -373,7 +373,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
@@ -382,7 +382,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
@@ -391,7 +391,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
@@ -400,7 +400,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
@@ -409,7 +409,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
@@ -418,7 +418,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
@@ -427,7 +427,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
@@ -436,7 +436,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
@@ -445,7 +445,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
@@ -454,7 +454,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
@@ -463,7 +463,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
@@ -472,7 +472,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
@@ -481,7 +481,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
@@ -490,7 +490,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
@@ -499,7 +499,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
@@ -508,7 +508,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
@@ -517,7 +517,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
@@ -526,7 +526,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
@@ -535,7 +535,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
@@ -544,6 +544,6 @@ vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccu.c
index 4c01f01eaae6..df57641af88d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
@@ -22,7 +22,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
@@ -31,7 +31,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
@@ -40,7 +40,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
@@ -49,7 +49,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
@@ -58,7 +58,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
@@ -67,7 +67,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
@@ -76,7 +76,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
@@ -85,7 +85,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
@@ -94,7 +94,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
@@ -103,7 +103,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
@@ -112,7 +112,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
@@ -121,7 +121,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
@@ -130,7 +130,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
@@ -139,7 +139,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
@@ -148,7 +148,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
@@ -157,7 +157,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
@@ -166,7 +166,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
@@ -175,7 +175,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
@@ -184,7 +184,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
@@ -193,7 +193,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
@@ -202,7 +202,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
@@ -211,7 +211,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
@@ -220,7 +220,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
@@ -229,7 +229,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
@@ -238,7 +238,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
@@ -247,7 +247,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
@@ -256,7 +256,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
@@ -265,7 +265,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
@@ -274,7 +274,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
@@ -283,7 +283,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
@@ -292,7 +292,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
@@ -301,7 +301,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint8_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
@@ -310,7 +310,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
@@ -319,7 +319,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint8_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
@@ -328,7 +328,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
@@ -337,7 +337,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
@@ -346,7 +346,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
@@ -355,7 +355,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
@@ -364,7 +364,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
@@ -373,7 +373,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
@@ -382,7 +382,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
@@ -391,7 +391,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
@@ -400,7 +400,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
@@ -409,7 +409,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
@@ -418,7 +418,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
@@ -427,7 +427,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
@@ -436,7 +436,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
@@ -445,7 +445,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
@@ -454,7 +454,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
@@ -463,7 +463,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
@@ -472,7 +472,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
@@ -481,7 +481,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
@@ -490,7 +490,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
@@ -499,7 +499,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
@@ -508,7 +508,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
@@ -517,7 +517,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
@@ -526,7 +526,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
@@ -535,7 +535,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
@@ -544,6 +544,6 @@ vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccus.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccus.c
index 9f8ad1b46ffb..31d28e891a9e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccus.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccus.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
@@ -22,7 +22,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
@@ -31,7 +31,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
@@ -40,7 +40,7 @@ vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
@@ -49,7 +49,7 @@ vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
@@ -58,7 +58,7 @@ vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
@@ -67,7 +67,7 @@ vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
@@ -76,7 +76,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
@@ -85,7 +85,7 @@ vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
@@ -94,7 +94,7 @@ vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
@@ -103,7 +103,7 @@ vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
@@ -112,7 +112,7 @@ vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
@@ -121,7 +121,7 @@ vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
@@ -130,7 +130,7 @@ vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
@@ -139,7 +139,7 @@ vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmaccus(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
@@ -148,7 +148,7 @@ vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
@@ -157,7 +157,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
@@ -166,7 +166,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
@@ -175,7 +175,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
@@ -184,7 +184,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
@@ -193,7 +193,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
@@ -202,7 +202,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
@@ -211,7 +211,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
@@ -220,7 +220,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
@@ -229,7 +229,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
@@ -238,7 +238,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
@@ -247,7 +247,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
@@ -256,7 +256,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
@@ -265,7 +265,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
@@ -274,6 +274,6 @@ vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmaccus(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmul.c
index 645d3d39500c..8fe0c571b3c4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmul.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2(
@@ -30,7 +30,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2(
@@ -39,7 +39,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1(
@@ -48,7 +48,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1(
@@ -57,7 +57,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2(
@@ -66,7 +66,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2(
@@ -75,7 +75,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4(
@@ -84,7 +84,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4(
@@ -93,7 +93,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8(
@@ -102,7 +102,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8(
@@ -111,7 +111,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2(
@@ -120,7 +120,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2(
@@ -129,7 +129,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1(
@@ -138,7 +138,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1(
@@ -147,7 +147,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2(
@@ -156,7 +156,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2(
@@ -165,7 +165,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4(
@@ -174,7 +174,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4(
@@ -183,7 +183,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8(
@@ -192,7 +192,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8(
@@ -201,7 +201,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1(
@@ -210,7 +210,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1(
@@ -219,7 +219,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2(
@@ -228,7 +228,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2(
@@ -237,7 +237,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4(
@@ -246,7 +246,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4(
@@ -255,7 +255,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8(
@@ -264,7 +264,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8(
@@ -273,7 +273,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vwmul(op1, op2, vl);
+ return __riscv_vwmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m(
@@ -282,7 +282,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m(
@@ -291,7 +291,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m(
@@ -300,7 +300,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m(
@@ -309,7 +309,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m(
@@ -318,7 +318,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m(
@@ -327,7 +327,7 @@ vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m(
@@ -336,7 +336,7 @@ vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m(
@@ -345,7 +345,7 @@ vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m(
@@ -354,7 +354,7 @@ vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m(
@@ -363,7 +363,7 @@ vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m(
@@ -372,7 +372,7 @@ vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m(
@@ -381,7 +381,7 @@ vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m(
@@ -390,7 +390,7 @@ vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m(
@@ -399,7 +399,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m(
@@ -408,7 +408,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m(
@@ -417,7 +417,7 @@ vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m(
@@ -426,7 +426,7 @@ vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m(
@@ -435,7 +435,7 @@ vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m(
@@ -444,7 +444,7 @@ vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m(
@@ -453,7 +453,7 @@ vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m(
@@ -462,7 +462,7 @@ vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m(
@@ -471,7 +471,7 @@ vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m(
@@ -480,7 +480,7 @@ vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m(
@@ -489,7 +489,7 @@ vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m(
@@ -498,7 +498,7 @@ vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m(
@@ -507,7 +507,7 @@ vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m(
@@ -516,7 +516,7 @@ vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m(
@@ -525,7 +525,7 @@ vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m(
@@ -534,7 +534,7 @@ vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m(
@@ -543,6 +543,6 @@ vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwmul(mask, op1, op2, vl);
+ return __riscv_vwmul(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulsu.c
index 02b4a99ba3d2..d11f49c81686 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulsu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulsu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2(
@@ -30,7 +30,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2(
@@ -39,7 +39,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1(
@@ -48,7 +48,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1(
@@ -57,7 +57,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2(
@@ -66,7 +66,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2(
@@ -75,7 +75,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4(
@@ -84,7 +84,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4(
@@ -93,7 +93,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8(
@@ -102,7 +102,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8(
@@ -111,7 +111,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2(
@@ -120,7 +120,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2(
@@ -129,7 +129,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1(
@@ -138,7 +138,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1(
@@ -147,7 +147,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2(
@@ -156,7 +156,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2(
@@ -165,7 +165,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4(
@@ -174,7 +174,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4(
@@ -183,7 +183,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8(
@@ -192,7 +192,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8(
@@ -201,7 +201,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1(
@@ -210,7 +210,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1(
@@ -219,7 +219,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2(
@@ -228,7 +228,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2(
@@ -237,7 +237,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4(
@@ -246,7 +246,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4(
@@ -255,7 +255,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8(
@@ -264,7 +264,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8(
@@ -273,7 +273,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulsu(op1, op2, vl);
+ return __riscv_vwmulsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m(
@@ -282,7 +282,7 @@ vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m(
@@ -291,7 +291,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m(
@@ -300,7 +300,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m(
@@ -309,7 +309,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m(
@@ -318,7 +318,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m(
@@ -327,7 +327,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m(
@@ -336,7 +336,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m(
@@ -345,7 +345,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m(
@@ -354,7 +354,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m(
@@ -363,7 +363,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m(
@@ -372,7 +372,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m(
@@ -381,7 +381,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m(
@@ -390,7 +390,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m(
@@ -399,7 +399,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m(
@@ -408,7 +408,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m(
@@ -417,7 +417,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m(
@@ -426,7 +426,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m(
@@ -435,7 +435,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m(
@@ -444,7 +444,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m(
@@ -453,7 +453,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m(
@@ -462,7 +462,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m(
@@ -471,7 +471,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m(
@@ -480,7 +480,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m(
@@ -489,7 +489,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m(
@@ -498,7 +498,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m(
@@ -507,7 +507,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m(
@@ -516,7 +516,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m(
@@ -525,7 +525,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m(
@@ -534,7 +534,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m(
@@ -543,6 +543,6 @@ vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulsu(mask, op1, op2, vl);
+ return __riscv_vwmulsu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulu.c
index 54ca317ed720..75e3fbebc031 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2(
@@ -30,7 +30,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2(
@@ -39,7 +39,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1(
@@ -48,7 +48,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1(
@@ -57,7 +57,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2(
@@ -66,7 +66,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2(
@@ -75,7 +75,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4(
@@ -84,7 +84,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4(
@@ -93,7 +93,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8(
@@ -102,7 +102,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8(
@@ -111,7 +111,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2(
@@ -120,7 +120,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2(
@@ -129,7 +129,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1(
@@ -138,7 +138,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1(
@@ -147,7 +147,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2(
@@ -156,7 +156,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2(
@@ -165,7 +165,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4(
@@ -174,7 +174,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4(
@@ -183,7 +183,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8(
@@ -192,7 +192,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8(
@@ -201,7 +201,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1(
@@ -210,7 +210,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1(
@@ -219,7 +219,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2(
@@ -228,7 +228,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2(
@@ -237,7 +237,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4(
@@ -246,7 +246,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4(
@@ -255,7 +255,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8(
@@ -264,7 +264,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8(
@@ -273,7 +273,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulu(op1, op2, vl);
+ return __riscv_vwmulu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m(
@@ -282,7 +282,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m(
@@ -291,7 +291,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m(
@@ -300,7 +300,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m(
@@ -309,7 +309,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m(
@@ -318,7 +318,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m(
@@ -327,7 +327,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m(
@@ -336,7 +336,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m(
@@ -345,7 +345,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m(
@@ -354,7 +354,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m(
@@ -363,7 +363,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m(
@@ -372,7 +372,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m(
@@ -381,7 +381,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m(
@@ -390,7 +390,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m(
@@ -399,7 +399,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m(
@@ -408,7 +408,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m(
@@ -417,7 +417,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m(
@@ -426,7 +426,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m(
@@ -435,7 +435,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m(
@@ -444,7 +444,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m(
@@ -453,7 +453,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m(
@@ -462,7 +462,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m(
@@ -471,7 +471,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m(
@@ -480,7 +480,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m(
@@ -489,7 +489,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m(
@@ -498,7 +498,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m(
@@ -507,7 +507,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m(
@@ -516,7 +516,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m(
@@ -525,7 +525,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m(
@@ -534,7 +534,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m(
@@ -543,6 +543,6 @@ vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulu(mask, op1, op2, vl);
+ return __riscv_vwmulu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c
index d34e0aba766c..d8e451031079 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
@@ -21,7 +21,7 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
@@ -30,7 +30,7 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1(
@@ -39,7 +39,7 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1(
@@ -48,7 +48,7 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1(
@@ -57,7 +57,7 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1(
@@ -66,7 +66,7 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
@@ -75,7 +75,7 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
@@ -84,7 +84,7 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1(
@@ -93,7 +93,7 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1(
@@ -102,7 +102,7 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1(
@@ -111,7 +111,7 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1(
@@ -120,7 +120,7 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
@@ -129,7 +129,7 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1(
@@ -138,7 +138,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1(
@@ -147,7 +147,7 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1(
@@ -156,7 +156,7 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1(
@@ -165,7 +165,7 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(vector, scalar, vl);
+ return __riscv_vwredsum(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
@@ -174,7 +174,7 @@ vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
@@ -183,7 +183,7 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint8mf8_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
@@ -192,7 +192,7 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint8mf4_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
@@ -201,7 +201,7 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint8mf2_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
@@ -210,7 +210,7 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
@@ -219,7 +219,7 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
@@ -228,7 +228,7 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
@@ -237,7 +237,7 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
@@ -246,7 +246,7 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint16mf4_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
@@ -255,7 +255,7 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint16mf2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
@@ -264,7 +264,7 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
@@ -273,7 +273,7 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
@@ -282,7 +282,7 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
@@ -291,7 +291,7 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
@@ -300,7 +300,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint32mf2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
@@ -309,7 +309,7 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
@@ -318,7 +318,7 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
@@ -327,6 +327,6 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum(mask, vector, scalar, vl);
+ return __riscv_vwredsum(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c
index b006fc5c38a1..dabb465aa138 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
@@ -21,7 +21,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
@@ -30,7 +30,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
@@ -39,7 +39,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
@@ -48,7 +48,7 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
@@ -57,7 +57,7 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
@@ -66,7 +66,7 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
@@ -75,7 +75,7 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
@@ -84,7 +84,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
@@ -93,7 +93,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
@@ -102,7 +102,7 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
@@ -111,7 +111,7 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
@@ -120,7 +120,7 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
@@ -129,7 +129,7 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
@@ -138,7 +138,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scal
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
@@ -147,7 +147,7 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
@@ -156,7 +156,7 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
@@ -165,7 +165,7 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(vector, scalar, vl);
+ return __riscv_vwredsumu(vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
@@ -174,7 +174,7 @@ vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
@@ -183,7 +183,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
@@ -192,7 +192,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
@@ -201,7 +201,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
@@ -210,7 +210,7 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
@@ -219,7 +219,7 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
@@ -228,7 +228,7 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
@@ -237,7 +237,7 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
@@ -246,7 +246,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
@@ -255,7 +255,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
@@ -264,7 +264,7 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
@@ -273,7 +273,7 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
@@ -282,7 +282,7 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
@@ -291,7 +291,7 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
@@ -300,7 +300,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
@@ -309,7 +309,7 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
@@ -318,7 +318,7 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
@@ -327,6 +327,6 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu(mask, vector, scalar, vl);
+ return __riscv_vwredsumu(mask, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsub.c
index e4c508d0c907..8b0b7b353740 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4(
@@ -30,7 +30,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4(
@@ -39,7 +39,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2(
@@ -48,7 +48,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2(
@@ -57,7 +57,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2(
@@ -66,7 +66,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2(
@@ -75,7 +75,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1(
@@ -84,7 +84,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1(
@@ -93,7 +93,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1(
@@ -102,7 +102,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1(
@@ -111,7 +111,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2(
@@ -120,7 +120,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2(
@@ -129,7 +129,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2(
@@ -138,7 +138,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2(
@@ -147,7 +147,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4(
@@ -156,7 +156,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4(
@@ -165,7 +165,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4(
@@ -174,7 +174,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4(
@@ -183,7 +183,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8(
@@ -192,7 +192,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8(
@@ -201,7 +201,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8(
@@ -210,7 +210,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8(
@@ -219,7 +219,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2(
@@ -228,7 +228,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2(
@@ -237,7 +237,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2(
@@ -246,7 +246,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1(
@@ -282,7 +282,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1(
@@ -291,7 +291,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2(
@@ -300,7 +300,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2(
@@ -309,7 +309,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2(
@@ -318,7 +318,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2(
@@ -327,7 +327,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4(
@@ -336,7 +336,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4(
@@ -345,7 +345,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4(
@@ -354,7 +354,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4(
@@ -363,7 +363,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8(
@@ -372,7 +372,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8(
@@ -381,7 +381,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8(
@@ -390,7 +390,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8(
@@ -399,7 +399,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1(
@@ -408,7 +408,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1(
@@ -417,7 +417,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1(
@@ -426,7 +426,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1(
@@ -435,7 +435,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2(
@@ -444,7 +444,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2(
@@ -453,7 +453,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2(
@@ -462,7 +462,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2(
@@ -471,7 +471,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4(
@@ -480,7 +480,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4(
@@ -489,7 +489,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4(
@@ -498,7 +498,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4(
@@ -507,7 +507,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8(
@@ -516,7 +516,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_vv(op1, op2, vl);
+ return __riscv_vwsub_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8(
@@ -525,7 +525,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
- return vwsub_vx(op1, op2, vl);
+ return __riscv_vwsub_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8(
@@ -534,7 +534,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_wv(op1, op2, vl);
+ return __riscv_vwsub_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8(
@@ -543,7 +543,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
- return vwsub_wx(op1, op2, vl);
+ return __riscv_vwsub_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m(
@@ -552,7 +552,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m(
@@ -561,7 +561,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m(
@@ -570,7 +570,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m(
@@ -579,7 +579,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m(
@@ -588,7 +588,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m(
@@ -597,7 +597,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m(
@@ -606,7 +606,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m(
@@ -615,7 +615,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m(
@@ -624,7 +624,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m(
@@ -633,7 +633,7 @@ vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m(
@@ -642,7 +642,7 @@ vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m(
@@ -651,7 +651,7 @@ vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m(
@@ -660,7 +660,7 @@ vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m(
@@ -669,7 +669,7 @@ vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m(
@@ -678,7 +678,7 @@ vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m(
@@ -687,7 +687,7 @@ vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m(
@@ -696,7 +696,7 @@ vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m(
@@ -705,7 +705,7 @@ vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m(
@@ -714,7 +714,7 @@ vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m(
@@ -723,7 +723,7 @@ vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m(
@@ -732,7 +732,7 @@ vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m(
@@ -741,7 +741,7 @@ vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m(
@@ -750,7 +750,7 @@ vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m(
@@ -759,7 +759,7 @@ vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m(
@@ -768,7 +768,7 @@ vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m(
@@ -777,7 +777,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m(
@@ -786,7 +786,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m(
@@ -795,7 +795,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m(
@@ -804,7 +804,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m(
@@ -813,7 +813,7 @@ vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m(
@@ -822,7 +822,7 @@ vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m(
@@ -831,7 +831,7 @@ vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m(
@@ -840,7 +840,7 @@ vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m(
@@ -849,7 +849,7 @@ vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m(
@@ -858,7 +858,7 @@ vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m(
@@ -867,7 +867,7 @@ vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m(
@@ -876,7 +876,7 @@ vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m(
@@ -885,7 +885,7 @@ vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m(
@@ -894,7 +894,7 @@ vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m(
@@ -903,7 +903,7 @@ vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m(
@@ -912,7 +912,7 @@ vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m(
@@ -921,7 +921,7 @@ vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m(
@@ -930,7 +930,7 @@ vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m(
@@ -939,7 +939,7 @@ vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m(
@@ -948,7 +948,7 @@ vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m(
@@ -957,7 +957,7 @@ vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m(
@@ -966,7 +966,7 @@ vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m(
@@ -975,7 +975,7 @@ vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m(
@@ -984,7 +984,7 @@ vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m(
@@ -993,7 +993,7 @@ vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m(
@@ -1002,7 +1002,7 @@ vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m(
@@ -1011,7 +1011,7 @@ vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m(
@@ -1020,7 +1020,7 @@ vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m(
@@ -1029,7 +1029,7 @@ vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m(
@@ -1038,7 +1038,7 @@ vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m(
@@ -1047,7 +1047,7 @@ vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m(
@@ -1056,7 +1056,7 @@ vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_vv(mask, op1, op2, vl);
+ return __riscv_vwsub_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m(
@@ -1065,7 +1065,7 @@ vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwsub_vx(mask, op1, op2, vl);
+ return __riscv_vwsub_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m(
@@ -1074,7 +1074,7 @@ vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_wv(mask, op1, op2, vl);
+ return __riscv_vwsub_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m(
@@ -1083,6 +1083,6 @@ vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwsub_wx(mask, op1, op2, vl);
+ return __riscv_vwsub_wx(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsubu.c
index d47ab0ae6908..d5834fd01ab2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsubu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsubu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4(
@@ -30,7 +30,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4(
@@ -39,7 +39,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2(
@@ -48,7 +48,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2(
@@ -57,7 +57,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2(
@@ -66,7 +66,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2(
@@ -75,7 +75,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1(
@@ -84,7 +84,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1(
@@ -93,7 +93,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1(
@@ -102,7 +102,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1(
@@ -111,7 +111,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2(
@@ -120,7 +120,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2(
@@ -129,7 +129,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2(
@@ -138,7 +138,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2(
@@ -147,7 +147,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4(
@@ -156,7 +156,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4(
@@ -165,7 +165,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4(
@@ -174,7 +174,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4(
@@ -183,7 +183,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8(
@@ -192,7 +192,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8(
@@ -201,7 +201,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8(
@@ -210,7 +210,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8(
@@ -219,7 +219,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2(
@@ -228,7 +228,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2(
@@ -237,7 +237,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2(
@@ -246,7 +246,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1(
@@ -282,7 +282,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1(
@@ -291,7 +291,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2(
@@ -300,7 +300,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2(
@@ -309,7 +309,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2(
@@ -318,7 +318,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2(
@@ -327,7 +327,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4(
@@ -336,7 +336,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4(
@@ -345,7 +345,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4(
@@ -354,7 +354,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4(
@@ -363,7 +363,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8(
@@ -372,7 +372,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8(
@@ -381,7 +381,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8(
@@ -390,7 +390,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8(
@@ -399,7 +399,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1(
@@ -408,7 +408,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1(
@@ -417,7 +417,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1(
@@ -426,7 +426,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1(
@@ -435,7 +435,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2(
@@ -444,7 +444,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2(
@@ -453,7 +453,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2(
@@ -462,7 +462,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2(
@@ -471,7 +471,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4(
@@ -480,7 +480,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4(
@@ -489,7 +489,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4(
@@ -498,7 +498,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4(
@@ -507,7 +507,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8(
@@ -516,7 +516,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_vv(op1, op2, vl);
+ return __riscv_vwsubu_vv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8(
@@ -525,7 +525,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx(op1, op2, vl);
+ return __riscv_vwsubu_vx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8(
@@ -534,7 +534,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_wv(op1, op2, vl);
+ return __riscv_vwsubu_wv(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8(
@@ -543,7 +543,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx(op1, op2, vl);
+ return __riscv_vwsubu_wx(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m(
@@ -552,7 +552,7 @@ vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m(
@@ -561,7 +561,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m(
@@ -570,7 +570,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m(
@@ -579,7 +579,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m(
@@ -588,7 +588,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m(
@@ -597,7 +597,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m(
@@ -606,7 +606,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m(
@@ -615,7 +615,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m(
@@ -624,7 +624,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m(
@@ -633,7 +633,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m(
@@ -642,7 +642,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m(
@@ -651,7 +651,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m(
@@ -660,7 +660,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m(
@@ -669,7 +669,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m(
@@ -678,7 +678,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m(
@@ -687,7 +687,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m(
@@ -696,7 +696,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m(
@@ -705,7 +705,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m(
@@ -714,7 +714,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m(
@@ -723,7 +723,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m(
@@ -732,7 +732,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m(
@@ -741,7 +741,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m(
@@ -750,7 +750,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m(
@@ -759,7 +759,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m(
@@ -768,7 +768,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m(
@@ -777,7 +777,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m(
@@ -786,7 +786,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m(
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m(
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m(
@@ -813,7 +813,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m(
@@ -822,7 +822,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m(
@@ -831,7 +831,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m(
@@ -840,7 +840,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m(
@@ -849,7 +849,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m(
@@ -858,7 +858,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m(
@@ -867,7 +867,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m(
@@ -876,7 +876,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m(
@@ -885,7 +885,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m(
@@ -894,7 +894,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m(
@@ -903,7 +903,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m(
@@ -912,7 +912,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m(
@@ -921,7 +921,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m(
@@ -930,7 +930,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m(
@@ -939,7 +939,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m(
@@ -948,7 +948,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m(
@@ -957,7 +957,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m(
@@ -966,7 +966,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m(
@@ -975,7 +975,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m(
@@ -984,7 +984,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m(
@@ -993,7 +993,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m(
@@ -1002,7 +1002,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m(
@@ -1011,7 +1011,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m(
@@ -1020,7 +1020,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m(
@@ -1029,7 +1029,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m(
@@ -1038,7 +1038,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m(
@@ -1047,7 +1047,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m(
@@ -1056,7 +1056,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_vv(mask, op1, op2, vl);
+ return __riscv_vwsubu_vv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m(
@@ -1065,7 +1065,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx(mask, op1, op2, vl);
+ return __riscv_vwsubu_vx(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m(
@@ -1074,7 +1074,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_wv(mask, op1, op2, vl);
+ return __riscv_vwsubu_wv(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m(
@@ -1083,6 +1083,6 @@ vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx(mask, op1, op2, vl);
+ return __riscv_vwsubu_wx(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vxor.c
index cd0fcf289cb3..9e3f8bbc7cbf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vxor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
@@ -336,7 +336,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
@@ -345,7 +345,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
@@ -354,7 +354,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
@@ -363,7 +363,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
@@ -372,7 +372,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
@@ -381,7 +381,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
@@ -390,7 +390,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
@@ -399,7 +399,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
@@ -408,7 +408,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
@@ -471,7 +471,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
@@ -480,7 +480,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
@@ -489,7 +489,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
@@ -498,7 +498,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
@@ -507,7 +507,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
@@ -516,7 +516,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
@@ -525,7 +525,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
@@ -534,7 +534,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
@@ -579,7 +579,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
@@ -588,7 +588,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
@@ -597,7 +597,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
@@ -606,7 +606,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
@@ -615,7 +615,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
@@ -624,7 +624,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
@@ -633,7 +633,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
@@ -642,7 +642,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
@@ -669,7 +669,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
@@ -678,7 +678,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
@@ -687,7 +687,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
@@ -696,7 +696,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
@@ -705,7 +705,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
@@ -714,7 +714,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
@@ -723,7 +723,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
@@ -732,7 +732,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
@@ -741,7 +741,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
@@ -750,7 +750,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
@@ -759,7 +759,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
@@ -768,7 +768,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
@@ -777,7 +777,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
@@ -786,7 +786,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
@@ -795,7 +795,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vxor(op1, op2, vl);
+ return __riscv_vxor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m(
@@ -804,7 +804,7 @@ vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m(
@@ -813,7 +813,7 @@ vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m(
@@ -822,7 +822,7 @@ vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m(
@@ -831,7 +831,7 @@ vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m(
@@ -840,7 +840,7 @@ vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m(
@@ -849,7 +849,7 @@ vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m(
@@ -858,7 +858,7 @@ vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m(
@@ -867,7 +867,7 @@ vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m(
@@ -876,7 +876,7 @@ vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m(
@@ -885,7 +885,7 @@ vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m(
@@ -894,7 +894,7 @@ vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m(
@@ -903,7 +903,7 @@ vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m(
@@ -912,7 +912,7 @@ vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m(
@@ -921,7 +921,7 @@ vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m(
@@ -930,7 +930,7 @@ vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m(
@@ -939,7 +939,7 @@ vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m(
@@ -948,7 +948,7 @@ vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m(
@@ -957,7 +957,7 @@ vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m(
@@ -966,7 +966,7 @@ vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m(
@@ -975,7 +975,7 @@ vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m(
@@ -984,7 +984,7 @@ vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m(
@@ -993,7 +993,7 @@ vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vxor(mask, op1, op2, vl);
+ return __riscv_vxor(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c
index 64c57dcc75e2..f5a154aebae6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
@@ -30,7 +30,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
@@ -39,7 +39,7 @@ vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
@@ -48,7 +48,7 @@ vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
@@ -57,7 +57,7 @@ vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
@@ -66,7 +66,7 @@ vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
- return vzext_vf4(op1, vl);
+ return __riscv_vzext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
@@ -75,7 +75,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
- return vzext_vf4(op1, vl);
+ return __riscv_vzext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
@@ -84,7 +84,7 @@ vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
- return vzext_vf4(op1, vl);
+ return __riscv_vzext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
@@ -93,7 +93,7 @@ vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
- return vzext_vf4(op1, vl);
+ return __riscv_vzext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
@@ -102,7 +102,7 @@ vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
- return vzext_vf4(op1, vl);
+ return __riscv_vzext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
@@ -111,7 +111,7 @@ vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
- return vzext_vf8(op1, vl);
+ return __riscv_vzext_vf8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
@@ -120,7 +120,7 @@ vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
- return vzext_vf8(op1, vl);
+ return __riscv_vzext_vf8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
@@ -129,7 +129,7 @@ vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
- return vzext_vf8(op1, vl);
+ return __riscv_vzext_vf8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
@@ -138,7 +138,7 @@ vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
- return vzext_vf8(op1, vl);
+ return __riscv_vzext_vf8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
@@ -147,7 +147,7 @@ vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
@@ -156,7 +156,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
@@ -165,7 +165,7 @@ vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
@@ -174,7 +174,7 @@ vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
@@ -183,7 +183,7 @@ vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
@@ -192,7 +192,7 @@ vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
- return vzext_vf4(op1, vl);
+ return __riscv_vzext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
@@ -201,7 +201,7 @@ vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
- return vzext_vf4(op1, vl);
+ return __riscv_vzext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
@@ -210,7 +210,7 @@ vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
- return vzext_vf4(op1, vl);
+ return __riscv_vzext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
@@ -219,7 +219,7 @@ vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
- return vzext_vf4(op1, vl);
+ return __riscv_vzext_vf4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
@@ -228,7 +228,7 @@ vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
@@ -237,7 +237,7 @@ vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
@@ -246,7 +246,7 @@ vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
@@ -255,7 +255,7 @@ vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
- return vzext_vf2(op1, vl);
+ return __riscv_vzext_vf2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m(
@@ -264,7 +264,7 @@ vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m(
@@ -273,7 +273,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m(
@@ -282,7 +282,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m(
@@ -291,7 +291,7 @@ vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint8m1_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m(
@@ -300,7 +300,7 @@ vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint8m2_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m(
@@ -309,7 +309,7 @@ vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint8m4_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m(
@@ -318,7 +318,7 @@ vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) {
- return vzext_vf4(mask, op1, vl);
+ return __riscv_vzext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m(
@@ -327,7 +327,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) {
- return vzext_vf4(mask, op1, vl);
+ return __riscv_vzext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m(
@@ -336,7 +336,7 @@ vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) {
- return vzext_vf4(mask, op1, vl);
+ return __riscv_vzext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m(
@@ -345,7 +345,7 @@ vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) {
- return vzext_vf4(mask, op1, vl);
+ return __riscv_vzext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m(
@@ -354,7 +354,7 @@ vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) {
- return vzext_vf4(mask, op1, vl);
+ return __riscv_vzext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m(
@@ -363,7 +363,7 @@ vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) {
- return vzext_vf8(mask, op1, vl);
+ return __riscv_vzext_vf8(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m(
@@ -372,7 +372,7 @@ vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) {
- return vzext_vf8(mask, op1, vl);
+ return __riscv_vzext_vf8(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m(
@@ -381,7 +381,7 @@ vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) {
- return vzext_vf8(mask, op1, vl);
+ return __riscv_vzext_vf8(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m(
@@ -390,7 +390,7 @@ vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) {
- return vzext_vf8(mask, op1, vl);
+ return __riscv_vzext_vf8(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m(
@@ -399,7 +399,7 @@ vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m(
@@ -408,7 +408,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m(
@@ -417,7 +417,7 @@ vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint16mf2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint16m1_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m(
@@ -426,7 +426,7 @@ vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint16m2_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m(
@@ -435,7 +435,7 @@ vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint16m4_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m(
@@ -444,7 +444,7 @@ vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint16m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) {
- return vzext_vf4(mask, op1, vl);
+ return __riscv_vzext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m(
@@ -453,7 +453,7 @@ vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) {
- return vzext_vf4(mask, op1, vl);
+ return __riscv_vzext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m(
@@ -462,7 +462,7 @@ vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) {
- return vzext_vf4(mask, op1, vl);
+ return __riscv_vzext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m(
@@ -471,7 +471,7 @@ vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) {
- return vzext_vf4(mask, op1, vl);
+ return __riscv_vzext_vf4(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m(
@@ -480,7 +480,7 @@ vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m(
@@ -489,7 +489,7 @@ vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint32mf2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint32m1_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m(
@@ -498,7 +498,7 @@ vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint32m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint32m2_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m(
@@ -507,6 +507,6 @@ vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint32m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint32m4_t op1, size_t vl) {
- return vzext_vf2(mask, op1, vl);
+ return __riscv_vzext_vf2(mask, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaadd.c
index 98ad1aafad26..693d6c1293e8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaadd.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vaadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vaadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vaadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vaadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vaadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaadd_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaaddu.c
index 7ed2f34720c7..afd3068c2a73 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaaddu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaaddu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vaaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vaaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vaaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadc.c
index 71d404ca5ec0..1269dfe18b49 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadc.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8_tu(
@@ -408,7 +408,7 @@ vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8_tu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4_tu(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4_tu(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2_tu(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2_tu(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8m1_tu(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8m1_tu(
@@ -471,7 +471,7 @@ vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8m2_tu(
@@ -480,7 +480,7 @@ vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8m2_tu(
@@ -489,7 +489,7 @@ vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8m4_tu(
@@ -498,7 +498,7 @@ vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8m4_tu(
@@ -507,7 +507,7 @@ vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u8m8_tu(
@@ -516,7 +516,7 @@ vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u8m8_tu(
@@ -525,7 +525,7 @@ vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4_tu(
@@ -534,7 +534,7 @@ vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4_tu(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2_tu(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2_tu(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16m1_tu(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16m1_tu(
@@ -579,7 +579,7 @@ vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16m2_tu(
@@ -588,7 +588,7 @@ vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16m2_tu(
@@ -597,7 +597,7 @@ vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16m4_tu(
@@ -606,7 +606,7 @@ vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16m4_tu(
@@ -615,7 +615,7 @@ vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u16m8_tu(
@@ -624,7 +624,7 @@ vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u16m8_tu(
@@ -633,7 +633,7 @@ vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_tu(
@@ -642,7 +642,7 @@ vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_tu(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32m1_tu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32m1_tu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32m2_tu(
@@ -678,7 +678,7 @@ vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32m2_tu(
@@ -687,7 +687,7 @@ vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32m4_tu(
@@ -696,7 +696,7 @@ vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32m4_tu(
@@ -705,7 +705,7 @@ vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u32m8_tu(
@@ -714,7 +714,7 @@ vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u32m8_tu(
@@ -723,7 +723,7 @@ vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u64m1_tu(
@@ -732,7 +732,7 @@ vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u64m1_tu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u64m2_tu(
@@ -750,7 +750,7 @@ vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u64m2_tu(
@@ -759,7 +759,7 @@ vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u64m4_tu(
@@ -768,7 +768,7 @@ vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u64m4_tu(
@@ -777,7 +777,7 @@ vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vvm_u64m8_tu(
@@ -786,7 +786,7 @@ vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
// CHECK-RV64-LABEL: @test_vadc_vxm_u64m8_tu(
@@ -795,6 +795,6 @@ vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadc_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) {
- return vadc_tu(maskedoff, op1, op2, carryin, vl);
+ return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadd.c
index 5ac1f3453270..32ae25381527 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vadd.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tu(
@@ -408,7 +408,7 @@ vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_tu(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_tu(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_tu(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_tu(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_tu(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_tu(
@@ -471,7 +471,7 @@ vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_tu(
@@ -480,7 +480,7 @@ vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_tu(
@@ -489,7 +489,7 @@ vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_tu(
@@ -498,7 +498,7 @@ vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_tu(
@@ -507,7 +507,7 @@ vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_tu(
@@ -516,7 +516,7 @@ vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_tu(
@@ -525,7 +525,7 @@ vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_tu(
@@ -534,7 +534,7 @@ vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_tu(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_tu(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_tu(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_tu(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_tu(
@@ -579,7 +579,7 @@ vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_tu(
@@ -588,7 +588,7 @@ vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_tu(
@@ -597,7 +597,7 @@ vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_tu(
@@ -606,7 +606,7 @@ vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_tu(
@@ -615,7 +615,7 @@ vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_tu(
@@ -624,7 +624,7 @@ vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_tu(
@@ -633,7 +633,7 @@ vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_tu(
@@ -642,7 +642,7 @@ vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_tu(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_tu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_tu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_tu(
@@ -678,7 +678,7 @@ vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_tu(
@@ -687,7 +687,7 @@ vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_tu(
@@ -696,7 +696,7 @@ vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_tu(
@@ -705,7 +705,7 @@ vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_tu(
@@ -714,7 +714,7 @@ vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_tu(
@@ -723,7 +723,7 @@ vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_tu(
@@ -732,7 +732,7 @@ vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_tu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_tu(
@@ -750,7 +750,7 @@ vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_tu(
@@ -759,7 +759,7 @@ vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_tu(
@@ -768,7 +768,7 @@ vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_tu(
@@ -777,7 +777,7 @@ vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_tu(
@@ -786,7 +786,7 @@ vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_tu(
@@ -795,7 +795,7 @@ vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tum(
@@ -804,7 +804,7 @@ vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tum(
@@ -813,7 +813,7 @@ vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_tum(
@@ -822,7 +822,7 @@ vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_tum(
@@ -831,7 +831,7 @@ vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_tum(
@@ -840,7 +840,7 @@ vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_tum(
@@ -849,7 +849,7 @@ vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_tum(
@@ -858,7 +858,7 @@ vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_tum(
@@ -867,7 +867,7 @@ vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_tum(
@@ -876,7 +876,7 @@ vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_tum(
@@ -885,7 +885,7 @@ vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_tum(
@@ -894,7 +894,7 @@ vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_tum(
@@ -903,7 +903,7 @@ vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_tum(
@@ -912,7 +912,7 @@ vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_tum(
@@ -921,7 +921,7 @@ vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_tum(
@@ -930,7 +930,7 @@ vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_tum(
@@ -939,7 +939,7 @@ vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_tum(
@@ -948,7 +948,7 @@ vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_tum(
@@ -957,7 +957,7 @@ vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_tum(
@@ -966,7 +966,7 @@ vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_tum(
@@ -975,7 +975,7 @@ vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_tum(
@@ -984,7 +984,7 @@ vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_tum(
@@ -993,7 +993,7 @@ vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_tum(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_tum(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_tum(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_tum(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_tum(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_tum(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_tum(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_tum(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_tum(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_tum(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_tum(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_tum(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_tum(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_tum(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_tum(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_tum(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_tum(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_tum(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_tum(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_tum(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_tum(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_tum(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tum(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tum(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_tum(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_tum(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_tum(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_tum(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_tum(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_tum(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_tum(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_tum(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_tum(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_tum(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_tum(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_tum(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_tum(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_tum(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_tum(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_tum(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_tum(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_tum(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_tum(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_tum(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_tum(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_tum(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_tum(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_tum(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_tum(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_tum(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_tum(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_tum(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_tum(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_tum(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_tum(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_tum(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_tum(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_tum(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_tum(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_tum(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_tum(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_tum(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_tum(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_tum(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_tum(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_tum(
@@ -1587,7 +1587,7 @@ vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tumu(
@@ -1596,7 +1596,7 @@ vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tumu(
@@ -1605,7 +1605,7 @@ vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_tumu(
@@ -1614,7 +1614,7 @@ vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_tumu(
@@ -1623,7 +1623,7 @@ vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_tumu(
@@ -1632,7 +1632,7 @@ vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_tumu(
@@ -1641,7 +1641,7 @@ vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_tumu(
@@ -1650,7 +1650,7 @@ vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_tumu(
@@ -1659,7 +1659,7 @@ vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_tumu(
@@ -1668,7 +1668,7 @@ vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_tumu(
@@ -1677,7 +1677,7 @@ vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_tumu(
@@ -1686,7 +1686,7 @@ vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_tumu(
@@ -1695,7 +1695,7 @@ vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_tumu(
@@ -1704,7 +1704,7 @@ vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_tumu(
@@ -1713,7 +1713,7 @@ vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_tumu(
@@ -1722,7 +1722,7 @@ vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_tumu(
@@ -1731,7 +1731,7 @@ vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_tumu(
@@ -1740,7 +1740,7 @@ vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_tumu(
@@ -1749,7 +1749,7 @@ vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_tumu(
@@ -1758,7 +1758,7 @@ vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_tumu(
@@ -1767,7 +1767,7 @@ vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_tumu(
@@ -1776,7 +1776,7 @@ vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_tumu(
@@ -1785,7 +1785,7 @@ vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_tumu(
@@ -1794,7 +1794,7 @@ vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_tumu(
@@ -1803,7 +1803,7 @@ vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_tumu(
@@ -1812,7 +1812,7 @@ vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_tumu(
@@ -1821,7 +1821,7 @@ vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_tumu(
@@ -1830,7 +1830,7 @@ vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_tumu(
@@ -1839,7 +1839,7 @@ vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_tumu(
@@ -1848,7 +1848,7 @@ vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_tumu(
@@ -1857,7 +1857,7 @@ vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_tumu(
@@ -1866,7 +1866,7 @@ vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_tumu(
@@ -1875,7 +1875,7 @@ vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_tumu(
@@ -1884,7 +1884,7 @@ vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_tumu(
@@ -1893,7 +1893,7 @@ vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_tumu(
@@ -1902,7 +1902,7 @@ vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_tumu(
@@ -1911,7 +1911,7 @@ vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_tumu(
@@ -1920,7 +1920,7 @@ vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_tumu(
@@ -1929,7 +1929,7 @@ vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_tumu(
@@ -1938,7 +1938,7 @@ vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_tumu(
@@ -1947,7 +1947,7 @@ vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_tumu(
@@ -1956,7 +1956,7 @@ vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_tumu(
@@ -1965,7 +1965,7 @@ vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_tumu(
@@ -1974,7 +1974,7 @@ vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_tumu(
@@ -1983,7 +1983,7 @@ vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tumu(
@@ -1992,7 +1992,7 @@ vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tumu(
@@ -2001,7 +2001,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_tumu(
@@ -2010,7 +2010,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_tumu(
@@ -2019,7 +2019,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_tumu(
@@ -2028,7 +2028,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_tumu(
@@ -2037,7 +2037,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_tumu(
@@ -2046,7 +2046,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_tumu(
@@ -2055,7 +2055,7 @@ vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_tumu(
@@ -2064,7 +2064,7 @@ vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_tumu(
@@ -2073,7 +2073,7 @@ vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_tumu(
@@ -2082,7 +2082,7 @@ vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_tumu(
@@ -2091,7 +2091,7 @@ vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_tumu(
@@ -2100,7 +2100,7 @@ vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_tumu(
@@ -2109,7 +2109,7 @@ vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_tumu(
@@ -2118,7 +2118,7 @@ vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_tumu(
@@ -2127,7 +2127,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_tumu(
@@ -2136,7 +2136,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_tumu(
@@ -2145,7 +2145,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_tumu(
@@ -2154,7 +2154,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_tumu(
@@ -2163,7 +2163,7 @@ vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_tumu(
@@ -2172,7 +2172,7 @@ vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_tumu(
@@ -2181,7 +2181,7 @@ vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_tumu(
@@ -2190,7 +2190,7 @@ vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_tumu(
@@ -2199,7 +2199,7 @@ vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_tumu(
@@ -2208,7 +2208,7 @@ vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_tumu(
@@ -2217,7 +2217,7 @@ vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_tumu(
@@ -2226,7 +2226,7 @@ vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_tumu(
@@ -2235,7 +2235,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_tumu(
@@ -2244,7 +2244,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_tumu(
@@ -2253,7 +2253,7 @@ vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_tumu(
@@ -2262,7 +2262,7 @@ vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_tumu(
@@ -2271,7 +2271,7 @@ vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_tumu(
@@ -2280,7 +2280,7 @@ vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_tumu(
@@ -2289,7 +2289,7 @@ vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_tumu(
@@ -2298,7 +2298,7 @@ vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_tumu(
@@ -2307,7 +2307,7 @@ vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_tumu(
@@ -2316,7 +2316,7 @@ vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_tumu(
@@ -2325,7 +2325,7 @@ vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_tumu(
@@ -2334,7 +2334,7 @@ vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_tumu(
@@ -2343,7 +2343,7 @@ vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_tumu(
@@ -2352,7 +2352,7 @@ vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_tumu(
@@ -2361,7 +2361,7 @@ vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_tumu(
@@ -2370,7 +2370,7 @@ vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_tumu(
@@ -2379,7 +2379,7 @@ vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_mu(
@@ -2388,7 +2388,7 @@ vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_mu(
@@ -2397,7 +2397,7 @@ vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_mu(
@@ -2406,7 +2406,7 @@ vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_mu(
@@ -2415,7 +2415,7 @@ vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_mu(
@@ -2424,7 +2424,7 @@ vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_mu(
@@ -2433,7 +2433,7 @@ vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_mu(
@@ -2442,7 +2442,7 @@ vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_mu(
@@ -2451,7 +2451,7 @@ vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_mu(
@@ -2460,7 +2460,7 @@ vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_mu(
@@ -2469,7 +2469,7 @@ vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_mu(
@@ -2478,7 +2478,7 @@ vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_mu(
@@ -2487,7 +2487,7 @@ vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_mu(
@@ -2496,7 +2496,7 @@ vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_mu(
@@ -2505,7 +2505,7 @@ vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_mu(
@@ -2514,7 +2514,7 @@ vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_mu(
@@ -2523,7 +2523,7 @@ vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_mu(
@@ -2532,7 +2532,7 @@ vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_mu(
@@ -2541,7 +2541,7 @@ vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_mu(
@@ -2550,7 +2550,7 @@ vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_mu(
@@ -2559,7 +2559,7 @@ vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_mu(
@@ -2568,7 +2568,7 @@ vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_mu(
@@ -2577,7 +2577,7 @@ vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_mu(
@@ -2586,7 +2586,7 @@ vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_mu(
@@ -2595,7 +2595,7 @@ vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_mu(
@@ -2604,7 +2604,7 @@ vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_mu(
@@ -2613,7 +2613,7 @@ vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_mu(
@@ -2622,7 +2622,7 @@ vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_mu(
@@ -2631,7 +2631,7 @@ vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_mu(
@@ -2640,7 +2640,7 @@ vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_mu(
@@ -2649,7 +2649,7 @@ vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_mu(
@@ -2658,7 +2658,7 @@ vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_mu(
@@ -2667,7 +2667,7 @@ vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_mu(
@@ -2676,7 +2676,7 @@ vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_mu(
@@ -2685,7 +2685,7 @@ vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_mu(
@@ -2694,7 +2694,7 @@ vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_mu(
@@ -2703,7 +2703,7 @@ vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_mu(
@@ -2712,7 +2712,7 @@ vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_mu(
@@ -2721,7 +2721,7 @@ vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_mu(
@@ -2730,7 +2730,7 @@ vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_mu(
@@ -2739,7 +2739,7 @@ vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_mu(
@@ -2748,7 +2748,7 @@ vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_mu(
@@ -2757,7 +2757,7 @@ vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_mu(
@@ -2766,7 +2766,7 @@ vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_mu(
@@ -2775,7 +2775,7 @@ vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_mu(
@@ -2784,7 +2784,7 @@ vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_mu(
@@ -2793,7 +2793,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_mu(
@@ -2802,7 +2802,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_mu(
@@ -2811,7 +2811,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_mu(
@@ -2820,7 +2820,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_mu(
@@ -2829,7 +2829,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_mu(
@@ -2838,7 +2838,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_mu(
@@ -2847,7 +2847,7 @@ vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_mu(
@@ -2856,7 +2856,7 @@ vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_mu(
@@ -2865,7 +2865,7 @@ vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_mu(
@@ -2874,7 +2874,7 @@ vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_mu(
@@ -2883,7 +2883,7 @@ vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_mu(
@@ -2892,7 +2892,7 @@ vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_mu(
@@ -2901,7 +2901,7 @@ vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_mu(
@@ -2910,7 +2910,7 @@ vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_mu(
@@ -2919,7 +2919,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_mu(
@@ -2928,7 +2928,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_mu(
@@ -2937,7 +2937,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_mu(
@@ -2946,7 +2946,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_mu(
@@ -2955,7 +2955,7 @@ vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_mu(
@@ -2964,7 +2964,7 @@ vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_mu(
@@ -2973,7 +2973,7 @@ vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_mu(
@@ -2982,7 +2982,7 @@ vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_mu(
@@ -2991,7 +2991,7 @@ vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_mu(
@@ -3000,7 +3000,7 @@ vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_mu(
@@ -3009,7 +3009,7 @@ vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_mu(
@@ -3018,7 +3018,7 @@ vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_mu(
@@ -3027,7 +3027,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_mu(
@@ -3036,7 +3036,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_mu(
@@ -3045,7 +3045,7 @@ vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_mu(
@@ -3054,7 +3054,7 @@ vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_mu(
@@ -3063,7 +3063,7 @@ vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_mu(
@@ -3072,7 +3072,7 @@ vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_mu(
@@ -3081,7 +3081,7 @@ vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_mu(
@@ -3090,7 +3090,7 @@ vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_mu(
@@ -3099,7 +3099,7 @@ vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_mu(
@@ -3108,7 +3108,7 @@ vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_mu(
@@ -3117,7 +3117,7 @@ vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_mu(
@@ -3126,7 +3126,7 @@ vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_mu(
@@ -3135,7 +3135,7 @@ vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_mu(
@@ -3144,7 +3144,7 @@ vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_mu(
@@ -3153,7 +3153,7 @@ vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_mu(
@@ -3162,7 +3162,7 @@ vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_mu(
@@ -3171,6 +3171,6 @@ vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vand.c
index 80a12b3478c8..6071f4e8f447 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vand.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_tu(
@@ -408,7 +408,7 @@ vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_tu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_tu(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_tu(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_tu(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_tu(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m1_tu(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m1_tu(
@@ -471,7 +471,7 @@ vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m2_tu(
@@ -480,7 +480,7 @@ vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m2_tu(
@@ -489,7 +489,7 @@ vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m4_tu(
@@ -498,7 +498,7 @@ vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m4_tu(
@@ -507,7 +507,7 @@ vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m8_tu(
@@ -516,7 +516,7 @@ vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m8_tu(
@@ -525,7 +525,7 @@ vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_tu(
@@ -534,7 +534,7 @@ vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_tu(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_tu(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_tu(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m1_tu(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m1_tu(
@@ -579,7 +579,7 @@ vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m2_tu(
@@ -588,7 +588,7 @@ vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m2_tu(
@@ -597,7 +597,7 @@ vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m4_tu(
@@ -606,7 +606,7 @@ vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m4_tu(
@@ -615,7 +615,7 @@ vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m8_tu(
@@ -624,7 +624,7 @@ vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m8_tu(
@@ -633,7 +633,7 @@ vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tu(
@@ -642,7 +642,7 @@ vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tu(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m1_tu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m1_tu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m2_tu(
@@ -678,7 +678,7 @@ vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m2_tu(
@@ -687,7 +687,7 @@ vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m4_tu(
@@ -696,7 +696,7 @@ vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m4_tu(
@@ -705,7 +705,7 @@ vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m8_tu(
@@ -714,7 +714,7 @@ vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m8_tu(
@@ -723,7 +723,7 @@ vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m1_tu(
@@ -732,7 +732,7 @@ vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m1_tu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m2_tu(
@@ -750,7 +750,7 @@ vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m2_tu(
@@ -759,7 +759,7 @@ vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m4_tu(
@@ -768,7 +768,7 @@ vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m4_tu(
@@ -777,7 +777,7 @@ vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m8_tu(
@@ -786,7 +786,7 @@ vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m8_tu(
@@ -795,7 +795,7 @@ vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vand_tu(maskedoff, op1, op2, vl);
+ return __riscv_vand_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_tum(
@@ -804,7 +804,7 @@ vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_tum(
@@ -813,7 +813,7 @@ vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_tum(
@@ -822,7 +822,7 @@ vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_tum(
@@ -831,7 +831,7 @@ vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_tum(
@@ -840,7 +840,7 @@ vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_tum(
@@ -849,7 +849,7 @@ vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m1_tum(
@@ -858,7 +858,7 @@ vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m1_tum(
@@ -867,7 +867,7 @@ vint8m1_t test_vand_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m2_tum(
@@ -876,7 +876,7 @@ vint8m1_t test_vand_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m2_tum(
@@ -885,7 +885,7 @@ vint8m2_t test_vand_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m4_tum(
@@ -894,7 +894,7 @@ vint8m2_t test_vand_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m4_tum(
@@ -903,7 +903,7 @@ vint8m4_t test_vand_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m8_tum(
@@ -912,7 +912,7 @@ vint8m4_t test_vand_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m8_tum(
@@ -921,7 +921,7 @@ vint8m8_t test_vand_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_tum(
@@ -930,7 +930,7 @@ vint8m8_t test_vand_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_tum(
@@ -939,7 +939,7 @@ vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_tum(
@@ -948,7 +948,7 @@ vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_tum(
@@ -957,7 +957,7 @@ vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m1_tum(
@@ -966,7 +966,7 @@ vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m1_tum(
@@ -975,7 +975,7 @@ vint16m1_t test_vand_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m2_tum(
@@ -984,7 +984,7 @@ vint16m1_t test_vand_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m2_tum(
@@ -993,7 +993,7 @@ vint16m2_t test_vand_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m4_tum(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vand_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m4_tum(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vand_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m8_tum(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vand_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m8_tum(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vand_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tum(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vand_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tum(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m1_tum(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m1_tum(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vand_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m2_tum(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vand_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m2_tum(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vand_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m4_tum(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vand_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m4_tum(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vand_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m8_tum(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vand_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m8_tum(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vand_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m1_tum(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vand_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m1_tum(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vand_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m2_tum(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vand_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m2_tum(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vand_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m4_tum(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vand_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m4_tum(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vand_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m8_tum(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vand_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m8_tum(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vand_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_tum(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vand_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_tum(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_tum(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_tum(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_tum(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_tum(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m1_tum(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m1_tum(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m2_tum(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m2_tum(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m4_tum(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m4_tum(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m8_tum(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m8_tum(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_tum(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_tum(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_tum(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_tum(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m1_tum(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m1_tum(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m2_tum(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m2_tum(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m4_tum(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m4_tum(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m8_tum(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m8_tum(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tum(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tum(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m1_tum(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m1_tum(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m2_tum(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m2_tum(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m4_tum(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m4_tum(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m8_tum(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m8_tum(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m1_tum(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m1_tum(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m2_tum(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m2_tum(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m4_tum(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m4_tum(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m8_tum(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m8_tum(
@@ -1587,7 +1587,7 @@ vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vand_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_tumu(
@@ -1596,7 +1596,7 @@ vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_tumu(
@@ -1605,7 +1605,7 @@ vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_tumu(
@@ -1614,7 +1614,7 @@ vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_tumu(
@@ -1623,7 +1623,7 @@ vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_tumu(
@@ -1632,7 +1632,7 @@ vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_tumu(
@@ -1641,7 +1641,7 @@ vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m1_tumu(
@@ -1650,7 +1650,7 @@ vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m1_tumu(
@@ -1659,7 +1659,7 @@ vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m2_tumu(
@@ -1668,7 +1668,7 @@ vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m2_tumu(
@@ -1677,7 +1677,7 @@ vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m4_tumu(
@@ -1686,7 +1686,7 @@ vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m4_tumu(
@@ -1695,7 +1695,7 @@ vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m8_tumu(
@@ -1704,7 +1704,7 @@ vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m8_tumu(
@@ -1713,7 +1713,7 @@ vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_tumu(
@@ -1722,7 +1722,7 @@ vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_tumu(
@@ -1731,7 +1731,7 @@ vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_tumu(
@@ -1740,7 +1740,7 @@ vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_tumu(
@@ -1749,7 +1749,7 @@ vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m1_tumu(
@@ -1758,7 +1758,7 @@ vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m1_tumu(
@@ -1767,7 +1767,7 @@ vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m2_tumu(
@@ -1776,7 +1776,7 @@ vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m2_tumu(
@@ -1785,7 +1785,7 @@ vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m4_tumu(
@@ -1794,7 +1794,7 @@ vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m4_tumu(
@@ -1803,7 +1803,7 @@ vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m8_tumu(
@@ -1812,7 +1812,7 @@ vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m8_tumu(
@@ -1821,7 +1821,7 @@ vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tumu(
@@ -1830,7 +1830,7 @@ vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tumu(
@@ -1839,7 +1839,7 @@ vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m1_tumu(
@@ -1848,7 +1848,7 @@ vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m1_tumu(
@@ -1857,7 +1857,7 @@ vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m2_tumu(
@@ -1866,7 +1866,7 @@ vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m2_tumu(
@@ -1875,7 +1875,7 @@ vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m4_tumu(
@@ -1884,7 +1884,7 @@ vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m4_tumu(
@@ -1893,7 +1893,7 @@ vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m8_tumu(
@@ -1902,7 +1902,7 @@ vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m8_tumu(
@@ -1911,7 +1911,7 @@ vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m1_tumu(
@@ -1920,7 +1920,7 @@ vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m1_tumu(
@@ -1929,7 +1929,7 @@ vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m2_tumu(
@@ -1938,7 +1938,7 @@ vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m2_tumu(
@@ -1947,7 +1947,7 @@ vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m4_tumu(
@@ -1956,7 +1956,7 @@ vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m4_tumu(
@@ -1965,7 +1965,7 @@ vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m8_tumu(
@@ -1974,7 +1974,7 @@ vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m8_tumu(
@@ -1983,7 +1983,7 @@ vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_tumu(
@@ -1992,7 +1992,7 @@ vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_tumu(
@@ -2001,7 +2001,7 @@ vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_tumu(
@@ -2010,7 +2010,7 @@ vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_tumu(
@@ -2019,7 +2019,7 @@ vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_tumu(
@@ -2028,7 +2028,7 @@ vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_tumu(
@@ -2037,7 +2037,7 @@ vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m1_tumu(
@@ -2046,7 +2046,7 @@ vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m1_tumu(
@@ -2055,7 +2055,7 @@ vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m2_tumu(
@@ -2064,7 +2064,7 @@ vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m2_tumu(
@@ -2073,7 +2073,7 @@ vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m4_tumu(
@@ -2082,7 +2082,7 @@ vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m4_tumu(
@@ -2091,7 +2091,7 @@ vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m8_tumu(
@@ -2100,7 +2100,7 @@ vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m8_tumu(
@@ -2109,7 +2109,7 @@ vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_tumu(
@@ -2118,7 +2118,7 @@ vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_tumu(
@@ -2127,7 +2127,7 @@ vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_tumu(
@@ -2136,7 +2136,7 @@ vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_tumu(
@@ -2145,7 +2145,7 @@ vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m1_tumu(
@@ -2154,7 +2154,7 @@ vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m1_tumu(
@@ -2163,7 +2163,7 @@ vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m2_tumu(
@@ -2172,7 +2172,7 @@ vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m2_tumu(
@@ -2181,7 +2181,7 @@ vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m4_tumu(
@@ -2190,7 +2190,7 @@ vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m4_tumu(
@@ -2199,7 +2199,7 @@ vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m8_tumu(
@@ -2208,7 +2208,7 @@ vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m8_tumu(
@@ -2217,7 +2217,7 @@ vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tumu(
@@ -2226,7 +2226,7 @@ vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tumu(
@@ -2235,7 +2235,7 @@ vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m1_tumu(
@@ -2244,7 +2244,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m1_tumu(
@@ -2253,7 +2253,7 @@ vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m2_tumu(
@@ -2262,7 +2262,7 @@ vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m2_tumu(
@@ -2271,7 +2271,7 @@ vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m4_tumu(
@@ -2280,7 +2280,7 @@ vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m4_tumu(
@@ -2289,7 +2289,7 @@ vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m8_tumu(
@@ -2298,7 +2298,7 @@ vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m8_tumu(
@@ -2307,7 +2307,7 @@ vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m1_tumu(
@@ -2316,7 +2316,7 @@ vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m1_tumu(
@@ -2325,7 +2325,7 @@ vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m2_tumu(
@@ -2334,7 +2334,7 @@ vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m2_tumu(
@@ -2343,7 +2343,7 @@ vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m4_tumu(
@@ -2352,7 +2352,7 @@ vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m4_tumu(
@@ -2361,7 +2361,7 @@ vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m8_tumu(
@@ -2370,7 +2370,7 @@ vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m8_tumu(
@@ -2379,7 +2379,7 @@ vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vand_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_mu(
@@ -2388,7 +2388,7 @@ vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_mu(
@@ -2397,7 +2397,7 @@ vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_mu(
@@ -2406,7 +2406,7 @@ vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_mu(
@@ -2415,7 +2415,7 @@ vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_mu(
@@ -2424,7 +2424,7 @@ vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_mu(
@@ -2433,7 +2433,7 @@ vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m1_mu(
@@ -2442,7 +2442,7 @@ vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m1_mu(
@@ -2451,7 +2451,7 @@ vint8m1_t test_vand_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vand_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m2_mu(
@@ -2460,7 +2460,7 @@ vint8m1_t test_vand_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m2_mu(
@@ -2469,7 +2469,7 @@ vint8m2_t test_vand_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vand_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m4_mu(
@@ -2478,7 +2478,7 @@ vint8m2_t test_vand_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m4_mu(
@@ -2487,7 +2487,7 @@ vint8m4_t test_vand_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vand_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i8m8_mu(
@@ -2496,7 +2496,7 @@ vint8m4_t test_vand_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i8m8_mu(
@@ -2505,7 +2505,7 @@ vint8m8_t test_vand_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vand_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_mu(
@@ -2514,7 +2514,7 @@ vint8m8_t test_vand_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_mu(
@@ -2523,7 +2523,7 @@ vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_mu(
@@ -2532,7 +2532,7 @@ vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_mu(
@@ -2541,7 +2541,7 @@ vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m1_mu(
@@ -2550,7 +2550,7 @@ vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m1_mu(
@@ -2559,7 +2559,7 @@ vint16m1_t test_vand_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vand_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m2_mu(
@@ -2568,7 +2568,7 @@ vint16m1_t test_vand_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m2_mu(
@@ -2577,7 +2577,7 @@ vint16m2_t test_vand_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vand_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m4_mu(
@@ -2586,7 +2586,7 @@ vint16m2_t test_vand_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m4_mu(
@@ -2595,7 +2595,7 @@ vint16m4_t test_vand_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vand_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i16m8_mu(
@@ -2604,7 +2604,7 @@ vint16m4_t test_vand_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i16m8_mu(
@@ -2613,7 +2613,7 @@ vint16m8_t test_vand_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vand_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_mu(
@@ -2622,7 +2622,7 @@ vint16m8_t test_vand_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_mu(
@@ -2631,7 +2631,7 @@ vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m1_mu(
@@ -2640,7 +2640,7 @@ vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m1_mu(
@@ -2649,7 +2649,7 @@ vint32m1_t test_vand_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vand_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m2_mu(
@@ -2658,7 +2658,7 @@ vint32m1_t test_vand_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m2_mu(
@@ -2667,7 +2667,7 @@ vint32m2_t test_vand_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vand_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m4_mu(
@@ -2676,7 +2676,7 @@ vint32m2_t test_vand_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m4_mu(
@@ -2685,7 +2685,7 @@ vint32m4_t test_vand_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vand_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i32m8_mu(
@@ -2694,7 +2694,7 @@ vint32m4_t test_vand_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i32m8_mu(
@@ -2703,7 +2703,7 @@ vint32m8_t test_vand_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vand_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m1_mu(
@@ -2712,7 +2712,7 @@ vint32m8_t test_vand_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m1_mu(
@@ -2721,7 +2721,7 @@ vint64m1_t test_vand_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vand_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m2_mu(
@@ -2730,7 +2730,7 @@ vint64m1_t test_vand_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m2_mu(
@@ -2739,7 +2739,7 @@ vint64m2_t test_vand_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vand_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m4_mu(
@@ -2748,7 +2748,7 @@ vint64m2_t test_vand_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m4_mu(
@@ -2757,7 +2757,7 @@ vint64m4_t test_vand_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vand_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_i64m8_mu(
@@ -2766,7 +2766,7 @@ vint64m4_t test_vand_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_i64m8_mu(
@@ -2775,7 +2775,7 @@ vint64m8_t test_vand_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vand_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_mu(
@@ -2784,7 +2784,7 @@ vint64m8_t test_vand_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_mu(
@@ -2793,7 +2793,7 @@ vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_mu(
@@ -2802,7 +2802,7 @@ vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_mu(
@@ -2811,7 +2811,7 @@ vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_mu(
@@ -2820,7 +2820,7 @@ vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_mu(
@@ -2829,7 +2829,7 @@ vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m1_mu(
@@ -2838,7 +2838,7 @@ vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m1_mu(
@@ -2847,7 +2847,7 @@ vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m2_mu(
@@ -2856,7 +2856,7 @@ vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m2_mu(
@@ -2865,7 +2865,7 @@ vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m4_mu(
@@ -2874,7 +2874,7 @@ vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m4_mu(
@@ -2883,7 +2883,7 @@ vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u8m8_mu(
@@ -2892,7 +2892,7 @@ vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u8m8_mu(
@@ -2901,7 +2901,7 @@ vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_mu(
@@ -2910,7 +2910,7 @@ vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_mu(
@@ -2919,7 +2919,7 @@ vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_mu(
@@ -2928,7 +2928,7 @@ vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_mu(
@@ -2937,7 +2937,7 @@ vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m1_mu(
@@ -2946,7 +2946,7 @@ vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m1_mu(
@@ -2955,7 +2955,7 @@ vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m2_mu(
@@ -2964,7 +2964,7 @@ vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m2_mu(
@@ -2973,7 +2973,7 @@ vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m4_mu(
@@ -2982,7 +2982,7 @@ vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m4_mu(
@@ -2991,7 +2991,7 @@ vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u16m8_mu(
@@ -3000,7 +3000,7 @@ vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u16m8_mu(
@@ -3009,7 +3009,7 @@ vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_mu(
@@ -3018,7 +3018,7 @@ vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_mu(
@@ -3027,7 +3027,7 @@ vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m1_mu(
@@ -3036,7 +3036,7 @@ vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m1_mu(
@@ -3045,7 +3045,7 @@ vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m2_mu(
@@ -3054,7 +3054,7 @@ vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m2_mu(
@@ -3063,7 +3063,7 @@ vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m4_mu(
@@ -3072,7 +3072,7 @@ vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m4_mu(
@@ -3081,7 +3081,7 @@ vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u32m8_mu(
@@ -3090,7 +3090,7 @@ vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u32m8_mu(
@@ -3099,7 +3099,7 @@ vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m1_mu(
@@ -3108,7 +3108,7 @@ vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m1_mu(
@@ -3117,7 +3117,7 @@ vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m2_mu(
@@ -3126,7 +3126,7 @@ vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m2_mu(
@@ -3135,7 +3135,7 @@ vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m4_mu(
@@ -3144,7 +3144,7 @@ vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m4_mu(
@@ -3153,7 +3153,7 @@ vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vv_u64m8_mu(
@@ -3162,7 +3162,7 @@ vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vand_vx_u64m8_mu(
@@ -3171,6 +3171,6 @@ vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vand_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vand_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vand_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasub.c
index 44900f849971..c5db0e0d61b4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vasub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vasub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vasub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vasub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasub_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasubu.c
index ce64bb63e9a4..3880027532b4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasubu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vasubu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vasubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vasubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vasubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vasubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vasubu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
index a5411907c01f..71db651f0403 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, vbool4_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, vbool2_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, vbool4_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_tu(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu(
@@ -157,7 +157,7 @@ vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu(
@@ -166,7 +166,7 @@ vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu(
@@ -175,7 +175,7 @@ vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu(
@@ -184,7 +184,7 @@ vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu(
@@ -193,7 +193,7 @@ vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu(
@@ -202,7 +202,7 @@ vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu(
@@ -211,7 +211,7 @@ vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu(
@@ -220,7 +220,7 @@ vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu(
@@ -229,7 +229,7 @@ vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu(
@@ -238,7 +238,7 @@ vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu(
@@ -247,7 +247,7 @@ vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vbool4_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu(
@@ -256,7 +256,7 @@ vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vbool2_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu(
@@ -265,7 +265,7 @@ vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu(
@@ -274,7 +274,7 @@ vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu(
@@ -301,7 +301,7 @@ vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vbool4_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu(
@@ -310,7 +310,7 @@ vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu(
@@ -319,7 +319,7 @@ vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu(
@@ -328,7 +328,7 @@ vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu(
@@ -337,7 +337,7 @@ vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu(
@@ -346,7 +346,7 @@ vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vboo
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu(
@@ -382,7 +382,7 @@ vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool4_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu(
@@ -391,7 +391,7 @@ vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool2_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu(
@@ -400,7 +400,7 @@ vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool1_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu(
@@ -409,7 +409,7 @@ vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu(
@@ -436,7 +436,7 @@ vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu(
@@ -445,7 +445,7 @@ vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, vbool4_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu(
@@ -454,7 +454,7 @@ vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, vbool2_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu(
@@ -463,7 +463,7 @@ vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu(
@@ -481,7 +481,7 @@ vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu(
@@ -490,7 +490,7 @@ vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu(
@@ -499,7 +499,7 @@ vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, vbool4_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu(
@@ -508,7 +508,7 @@ vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, vbool64_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu(
@@ -517,7 +517,7 @@ vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, vbool32_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu(
@@ -526,7 +526,7 @@ vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, vbool16_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
// CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu(
@@ -535,6 +535,6 @@ vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, vbool8_t mask, size_t vl) {
- return vcompress_tu(maskedoff, src, mask, vl);
+ return __riscv_vcompress_tu(maskedoff, src, mask, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdiv.c
index dac3c3fe0dfb..1ccfa37e1ac0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdiv.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vdiv_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdivu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdivu.c
index 8941d95f4f90..4fa791ad16f3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdivu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vdivu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vdivu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vdivu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vdivu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vdivu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vdivu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfabs.c
index 992b5ae6d24b..20959d1fef6a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfabs.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfabs.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfabs_tu(maskedoff, op1, vl);
+ return __riscv_vfabs_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_tum(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_tum(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m2_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m4_tum(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m8_tum(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tum(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4_tum(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8_tum(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfabs_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_tumu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_tumu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m1_tumu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m2_tumu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m4_tumu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m8_tumu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2_tumu(
@@ -355,7 +355,7 @@ vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4_tumu(
@@ -364,7 +364,7 @@ vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8_tumu(
@@ -373,7 +373,7 @@ vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1_tumu(
@@ -382,7 +382,7 @@ vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2_tumu(
@@ -391,7 +391,7 @@ vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4_tumu(
@@ -400,7 +400,7 @@ vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8_tumu(
@@ -409,7 +409,7 @@ vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfabs_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_mu(
@@ -418,7 +418,7 @@ vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_mu(
@@ -427,7 +427,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m1_mu(
@@ -436,7 +436,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m2_mu(
@@ -445,7 +445,7 @@ vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m4_mu(
@@ -454,7 +454,7 @@ vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f16m8_mu(
@@ -463,7 +463,7 @@ vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_mu(
@@ -472,7 +472,7 @@ vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m1_mu(
@@ -481,7 +481,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m2_mu(
@@ -490,7 +490,7 @@ vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m4_mu(
@@ -499,7 +499,7 @@ vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f32m8_mu(
@@ -508,7 +508,7 @@ vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m1_mu(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m2_mu(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m4_mu(
@@ -535,7 +535,7 @@ vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfabs_v_f64m8_mu(
@@ -544,6 +544,6 @@ vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfabs_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfabs_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfabs_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c
index bc79e8987e0f..ef88cbebc8cb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass.c
index 9eb2c575df0e..6d6158e348c8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_tu(
@@ -22,7 +22,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1_tu(
@@ -31,7 +31,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2_tu(
@@ -40,7 +40,7 @@ vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4_tu(
@@ -49,7 +49,7 @@ vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8_tu(
@@ -58,7 +58,7 @@ vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tu(
@@ -67,7 +67,7 @@ vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_tu(
@@ -76,7 +76,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_tu(
@@ -85,7 +85,7 @@ vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_tu(
@@ -94,7 +94,7 @@ vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_tu(
@@ -103,7 +103,7 @@ vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_tu(
@@ -112,7 +112,7 @@ vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_tu(
@@ -121,7 +121,7 @@ vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_tu(
@@ -130,7 +130,7 @@ vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_tu(
@@ -139,7 +139,7 @@ vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfclass_tu(maskedoff, op1, vl);
+ return __riscv_vfclass_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_tum(
@@ -148,7 +148,7 @@ vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_tum(
@@ -157,7 +157,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1_tum(
@@ -166,7 +166,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2_tum(
@@ -175,7 +175,7 @@ vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4_tum(
@@ -184,7 +184,7 @@ vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8_tum(
@@ -193,7 +193,7 @@ vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tum(
@@ -202,7 +202,7 @@ vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_tum(
@@ -211,7 +211,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_tum(
@@ -220,7 +220,7 @@ vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_tum(
@@ -229,7 +229,7 @@ vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_tum(
@@ -238,7 +238,7 @@ vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_tum(
@@ -247,7 +247,7 @@ vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_tum(
@@ -256,7 +256,7 @@ vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_tum(
@@ -265,7 +265,7 @@ vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_tum(
@@ -274,7 +274,7 @@ vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfclass_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_tumu(
@@ -283,7 +283,7 @@ vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_tumu(
@@ -292,7 +292,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1_tumu(
@@ -301,7 +301,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2_tumu(
@@ -310,7 +310,7 @@ vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4_tumu(
@@ -319,7 +319,7 @@ vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8_tumu(
@@ -328,7 +328,7 @@ vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tumu(
@@ -337,7 +337,7 @@ vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_tumu(
@@ -346,7 +346,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_tumu(
@@ -355,7 +355,7 @@ vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_tumu(
@@ -364,7 +364,7 @@ vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_tumu(
@@ -373,7 +373,7 @@ vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_tumu(
@@ -382,7 +382,7 @@ vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_tumu(
@@ -391,7 +391,7 @@ vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_tumu(
@@ -400,7 +400,7 @@ vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_tumu(
@@ -409,7 +409,7 @@ vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfclass_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_mu(
@@ -418,7 +418,7 @@ vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_mu(
@@ -427,7 +427,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1_mu(
@@ -436,7 +436,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2_mu(
@@ -445,7 +445,7 @@ vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4_mu(
@@ -454,7 +454,7 @@ vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8_mu(
@@ -463,7 +463,7 @@ vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_mu(
@@ -472,7 +472,7 @@ vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_mu(
@@ -481,7 +481,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_mu(
@@ -490,7 +490,7 @@ vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_mu(
@@ -499,7 +499,7 @@ vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_mu(
@@ -508,7 +508,7 @@ vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_mu(
@@ -517,7 +517,7 @@ vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_mu(
@@ -526,7 +526,7 @@ vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_mu(
@@ -535,7 +535,7 @@ vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_mu(
@@ -544,6 +544,6 @@ vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfclass_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfclass_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt.c
index 796cfe27c85e..f7da057866b5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tu(
@@ -22,7 +22,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tu(
@@ -31,7 +31,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tu(
@@ -40,7 +40,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tu(
@@ -49,7 +49,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tu(
@@ -58,7 +58,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tu(
@@ -67,7 +67,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tu(
@@ -76,7 +76,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tu(
@@ -85,7 +85,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tu(
@@ -94,7 +94,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tu(
@@ -103,7 +103,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tu(
@@ -112,7 +112,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tu(
@@ -121,7 +121,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tu(
@@ -130,7 +130,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tu(
@@ -139,7 +139,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tu(
@@ -148,7 +148,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tu(
@@ -157,7 +157,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tu(
@@ -166,7 +166,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tu(
@@ -175,7 +175,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tu(
@@ -184,7 +184,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tu(
@@ -193,7 +193,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tu(
@@ -202,7 +202,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tu(
@@ -211,7 +211,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tu(
@@ -220,7 +220,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tu(
@@ -229,7 +229,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_tu(
@@ -238,7 +238,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint16mf4_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_tu(
@@ -247,7 +247,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint16mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_tu(
@@ -256,7 +256,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint16m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_tu(
@@ -265,7 +265,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint16m2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_tu(
@@ -274,7 +274,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint16m4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_tu(
@@ -283,7 +283,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint16m8_t src, s
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_tu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_tu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_tu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint16m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_tu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint16m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_tu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint16m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint16m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tu(
@@ -346,7 +346,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tu(
@@ -355,7 +355,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tu(
@@ -364,7 +364,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tu(
@@ -373,7 +373,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tu(
@@ -382,7 +382,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tu(
@@ -391,7 +391,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tu(
@@ -400,7 +400,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tu(
@@ -409,7 +409,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tu(
@@ -418,7 +418,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tu(
@@ -427,7 +427,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tu(
@@ -436,7 +436,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tu(
@@ -445,7 +445,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tu(
@@ -454,7 +454,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tu(
@@ -463,7 +463,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tu(
@@ -472,7 +472,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tu(
@@ -481,7 +481,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tu(
@@ -490,7 +490,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tu(
@@ -499,7 +499,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tu(
@@ -508,7 +508,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tu(
@@ -517,7 +517,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_tu(
@@ -526,7 +526,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint32mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_tu(
@@ -535,7 +535,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint32m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_tu(
@@ -544,7 +544,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint32m2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_tu(
@@ -553,7 +553,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint32m4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tu(
@@ -562,7 +562,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint32m8_t src, s
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_tu(
@@ -571,7 +571,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_tu(
@@ -580,7 +580,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint32m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_tu(
@@ -589,7 +589,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint32m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_tu(
@@ -598,7 +598,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint32m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_tu(
@@ -607,7 +607,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint32m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tu(
@@ -616,7 +616,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tu(
@@ -625,7 +625,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tu(
@@ -634,7 +634,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tu(
@@ -643,7 +643,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tu(
@@ -652,7 +652,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tu(
@@ -661,7 +661,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tu(
@@ -670,7 +670,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tu(
@@ -679,7 +679,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tu(
@@ -688,7 +688,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tu(
@@ -697,7 +697,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tu(
@@ -706,7 +706,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tu(
@@ -715,7 +715,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tu(
@@ -724,7 +724,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tu(
@@ -733,7 +733,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tu(
@@ -742,7 +742,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tu(
@@ -751,7 +751,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_tu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint64m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_tu(
@@ -769,7 +769,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint64m2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_tu(
@@ -778,7 +778,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint64m4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_tu(
@@ -787,7 +787,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint64m8_t src, s
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_tu(
@@ -796,7 +796,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint64m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_tu(
@@ -805,7 +805,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint64m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_tu(
@@ -814,7 +814,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint64m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) {
- return vfcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_tum(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint64m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tum(
@@ -832,7 +832,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tum(
@@ -841,7 +841,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tum(
@@ -850,7 +850,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tum(
@@ -859,7 +859,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tum(
@@ -868,7 +868,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tum(
@@ -877,7 +877,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tum(
@@ -886,7 +886,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tum(
@@ -895,7 +895,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tum(
@@ -904,7 +904,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tum(
@@ -913,7 +913,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tum(
@@ -922,7 +922,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tum(
@@ -931,7 +931,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tum(
@@ -940,7 +940,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tum(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tum(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tum(
@@ -967,7 +967,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tum(
@@ -976,7 +976,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tum(
@@ -985,7 +985,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tum(
@@ -994,7 +994,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tum(
@@ -1003,7 +1003,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tum(
@@ -1012,7 +1012,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tum(
@@ -1021,7 +1021,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tum(
@@ -1030,7 +1030,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tum(
@@ -1039,7 +1039,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_tum(
@@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_tum(
@@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_tum(
@@ -1066,7 +1066,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_tum(
@@ -1075,7 +1075,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_tum(
@@ -1084,7 +1084,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_tum(
@@ -1093,7 +1093,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_tum(
@@ -1102,7 +1102,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_tum(
@@ -1111,7 +1111,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_tum(
@@ -1120,7 +1120,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_tum(
@@ -1129,7 +1129,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_tum(
@@ -1138,7 +1138,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tum(
@@ -1147,7 +1147,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tum(
@@ -1156,7 +1156,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tum(
@@ -1165,7 +1165,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tum(
@@ -1174,7 +1174,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tum(
@@ -1183,7 +1183,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tum(
@@ -1192,7 +1192,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tum(
@@ -1201,7 +1201,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tum(
@@ -1210,7 +1210,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tum(
@@ -1219,7 +1219,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tum(
@@ -1228,7 +1228,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tum(
@@ -1237,7 +1237,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tum(
@@ -1246,7 +1246,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tum(
@@ -1255,7 +1255,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tum(
@@ -1264,7 +1264,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tum(
@@ -1273,7 +1273,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tum(
@@ -1282,7 +1282,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tum(
@@ -1291,7 +1291,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tum(
@@ -1300,7 +1300,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tum(
@@ -1309,7 +1309,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tum(
@@ -1318,7 +1318,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tum(
@@ -1327,7 +1327,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_tum(
@@ -1336,7 +1336,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_tum(
@@ -1345,7 +1345,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_tum(
@@ -1354,7 +1354,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_tum(
@@ -1363,7 +1363,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tum(
@@ -1372,7 +1372,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_tum(
@@ -1381,7 +1381,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_tum(
@@ -1390,7 +1390,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_tum(
@@ -1399,7 +1399,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_tum(
@@ -1408,7 +1408,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_tum(
@@ -1417,7 +1417,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tum(
@@ -1426,7 +1426,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tum(
@@ -1435,7 +1435,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tum(
@@ -1444,7 +1444,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tum(
@@ -1453,7 +1453,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tum(
@@ -1462,7 +1462,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tum(
@@ -1471,7 +1471,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tum(
@@ -1480,7 +1480,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tum(
@@ -1489,7 +1489,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tum(
@@ -1498,7 +1498,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tum(
@@ -1507,7 +1507,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tum(
@@ -1516,7 +1516,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tum(
@@ -1525,7 +1525,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tum(
@@ -1534,7 +1534,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tum(
@@ -1543,7 +1543,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tum(
@@ -1552,7 +1552,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tum(
@@ -1561,7 +1561,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_tum(
@@ -1570,7 +1570,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_tum(
@@ -1579,7 +1579,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_tum(
@@ -1588,7 +1588,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_tum(
@@ -1597,7 +1597,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_tum(
@@ -1606,7 +1606,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_tum(
@@ -1615,7 +1615,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_tum(
@@ -1624,7 +1624,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) {
- return vfcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_tumu(
@@ -1633,7 +1633,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tumu(
@@ -1642,7 +1642,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tumu(
@@ -1651,7 +1651,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tumu(
@@ -1660,7 +1660,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tumu(
@@ -1669,7 +1669,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tumu(
@@ -1678,7 +1678,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tumu(
@@ -1687,7 +1687,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tumu(
@@ -1696,7 +1696,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tumu(
@@ -1705,7 +1705,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tumu(
@@ -1714,7 +1714,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tumu(
@@ -1723,7 +1723,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tumu(
@@ -1732,7 +1732,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tumu(
@@ -1741,7 +1741,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tumu(
@@ -1750,7 +1750,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tumu(
@@ -1759,7 +1759,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tumu(
@@ -1768,7 +1768,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tumu(
@@ -1777,7 +1777,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tumu(
@@ -1786,7 +1786,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tumu(
@@ -1795,7 +1795,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tumu(
@@ -1804,7 +1804,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tumu(
@@ -1813,7 +1813,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tumu(
@@ -1822,7 +1822,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tumu(
@@ -1831,7 +1831,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tumu(
@@ -1840,7 +1840,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tumu(
@@ -1849,7 +1849,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_tumu(
@@ -1858,7 +1858,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_tumu(
@@ -1867,7 +1867,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_tumu(
@@ -1876,7 +1876,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_tumu(
@@ -1885,7 +1885,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_tumu(
@@ -1894,7 +1894,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_tumu(
@@ -1903,7 +1903,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_tumu(
@@ -1912,7 +1912,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_tumu(
@@ -1921,7 +1921,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_tumu(
@@ -1930,7 +1930,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_tumu(
@@ -1939,7 +1939,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_tumu(
@@ -1948,7 +1948,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tumu(
@@ -1957,7 +1957,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tumu(
@@ -1966,7 +1966,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tumu(
@@ -1975,7 +1975,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tumu(
@@ -1984,7 +1984,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tumu(
@@ -1993,7 +1993,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tumu(
@@ -2002,7 +2002,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tumu(
@@ -2011,7 +2011,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tumu(
@@ -2020,7 +2020,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tumu(
@@ -2029,7 +2029,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tumu(
@@ -2038,7 +2038,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tumu(
@@ -2047,7 +2047,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tumu(
@@ -2056,7 +2056,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tumu(
@@ -2065,7 +2065,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tumu(
@@ -2074,7 +2074,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tumu(
@@ -2083,7 +2083,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tumu(
@@ -2092,7 +2092,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tumu(
@@ -2101,7 +2101,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tumu(
@@ -2110,7 +2110,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tumu(
@@ -2119,7 +2119,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tumu(
@@ -2128,7 +2128,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tumu(
@@ -2137,7 +2137,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_tumu(
@@ -2146,7 +2146,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_tumu(
@@ -2155,7 +2155,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_tumu(
@@ -2164,7 +2164,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_tumu(
@@ -2173,7 +2173,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tumu(
@@ -2182,7 +2182,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_tumu(
@@ -2191,7 +2191,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_tumu(
@@ -2200,7 +2200,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_tumu(
@@ -2209,7 +2209,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_tumu(
@@ -2218,7 +2218,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_tumu(
@@ -2227,7 +2227,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tumu(
@@ -2236,7 +2236,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tumu(
@@ -2245,7 +2245,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tumu(
@@ -2254,7 +2254,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tumu(
@@ -2263,7 +2263,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tumu(
@@ -2272,7 +2272,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tumu(
@@ -2281,7 +2281,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tumu(
@@ -2290,7 +2290,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tumu(
@@ -2299,7 +2299,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tumu(
@@ -2308,7 +2308,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tumu(
@@ -2317,7 +2317,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tumu(
@@ -2326,7 +2326,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tumu(
@@ -2335,7 +2335,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tumu(
@@ -2344,7 +2344,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tumu(
@@ -2353,7 +2353,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tumu(
@@ -2362,7 +2362,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tumu(
@@ -2371,7 +2371,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_tumu(
@@ -2380,7 +2380,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_tumu(
@@ -2389,7 +2389,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_tumu(
@@ -2398,7 +2398,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_tumu(
@@ -2407,7 +2407,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_tumu(
@@ -2416,7 +2416,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_tumu(
@@ -2425,7 +2425,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_tumu(
@@ -2434,7 +2434,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) {
- return vfcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_mu(
@@ -2443,7 +2443,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_mu(
@@ -2452,7 +2452,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_mu(
@@ -2461,7 +2461,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_mu(
@@ -2470,7 +2470,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_mu(
@@ -2479,7 +2479,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_mu(
@@ -2488,7 +2488,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_mu(
@@ -2497,7 +2497,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_mu(
@@ -2506,7 +2506,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_mu(
@@ -2515,7 +2515,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_mu(
@@ -2524,7 +2524,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_mu(
@@ -2533,7 +2533,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_mu(
@@ -2542,7 +2542,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_mu(
@@ -2551,7 +2551,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_mu(
@@ -2560,7 +2560,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_mu(
@@ -2569,7 +2569,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_mu(
@@ -2578,7 +2578,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_mu(
@@ -2587,7 +2587,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_mu(
@@ -2596,7 +2596,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_mu(
@@ -2605,7 +2605,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_mu(
@@ -2614,7 +2614,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_mu(
@@ -2623,7 +2623,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_mu(
@@ -2632,7 +2632,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_mu(
@@ -2641,7 +2641,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_mu(
@@ -2650,7 +2650,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_mu(
@@ -2659,7 +2659,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_mu(
@@ -2668,7 +2668,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_mu(
@@ -2677,7 +2677,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_mu(
@@ -2686,7 +2686,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_mu(
@@ -2695,7 +2695,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_mu(
@@ -2704,7 +2704,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_mu(
@@ -2713,7 +2713,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_mu(
@@ -2722,7 +2722,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_mu(
@@ -2731,7 +2731,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_mu(
@@ -2740,7 +2740,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_mu(
@@ -2749,7 +2749,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_mu(
@@ -2758,7 +2758,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_mu(
@@ -2767,7 +2767,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_mu(
@@ -2776,7 +2776,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_mu(
@@ -2785,7 +2785,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_mu(
@@ -2794,7 +2794,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_mu(
@@ -2803,7 +2803,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_mu(
@@ -2812,7 +2812,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_mu(
@@ -2821,7 +2821,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_mu(
@@ -2830,7 +2830,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_mu(
@@ -2839,7 +2839,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_mu(
@@ -2848,7 +2848,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_mu(
@@ -2857,7 +2857,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_mu(
@@ -2866,7 +2866,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_mu(
@@ -2875,7 +2875,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_mu(
@@ -2884,7 +2884,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_mu(
@@ -2893,7 +2893,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_mu(
@@ -2902,7 +2902,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_mu(
@@ -2911,7 +2911,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_mu(
@@ -2920,7 +2920,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_mu(
@@ -2929,7 +2929,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_mu(
@@ -2938,7 +2938,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_mu(
@@ -2947,7 +2947,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_mu(
@@ -2956,7 +2956,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_mu(
@@ -2965,7 +2965,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_mu(
@@ -2974,7 +2974,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_mu(
@@ -2983,7 +2983,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_mu(
@@ -2992,7 +2992,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_mu(
@@ -3001,7 +3001,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_mu(
@@ -3010,7 +3010,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_mu(
@@ -3019,7 +3019,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_mu(
@@ -3028,7 +3028,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_mu(
@@ -3037,7 +3037,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_mu(
@@ -3046,7 +3046,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_mu(
@@ -3055,7 +3055,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_mu(
@@ -3064,7 +3064,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_mu(
@@ -3073,7 +3073,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_mu(
@@ -3082,7 +3082,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_mu(
@@ -3091,7 +3091,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_mu(
@@ -3100,7 +3100,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_mu(
@@ -3109,7 +3109,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_mu(
@@ -3118,7 +3118,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_mu(
@@ -3127,7 +3127,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_mu(
@@ -3136,7 +3136,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_mu(
@@ -3145,7 +3145,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_mu(
@@ -3154,7 +3154,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_mu(
@@ -3163,7 +3163,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_mu(
@@ -3172,7 +3172,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_mu(
@@ -3181,7 +3181,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_mu(
@@ -3190,7 +3190,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_mu(
@@ -3199,7 +3199,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_mu(
@@ -3208,7 +3208,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_mu(
@@ -3217,7 +3217,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_mu(
@@ -3226,7 +3226,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_mu(
@@ -3235,7 +3235,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_mu(
@@ -3244,6 +3244,6 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) {
- return vfcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfdiv.c
index 38be6a32ca09..c7277ef389e3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfdiv.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfdiv_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc.c
index ceb0273891bc..6ff8675eaa83 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd.c
index 6a3ae6abb74c..71d62de68efa 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax.c
index bf8c13d44a5c..29e228c3a918 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmax_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c
index b39ba3496939..751d371806b5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_tu(
@@ -139,6 +139,6 @@ vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) {
- return vfmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin.c
index 3ffdfc461480..f1c337742e4c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmin_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac.c
index f8e9ca9f0574..962289301cc1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub.c
index bee4b4daaf77..744a77a431f3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul.c
index 249995e1c877..8a672b2597de 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmul_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv.c
index 47a140d85418..36c4eb0cb1bf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t maskedoff, _Float16 src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t maskedoff, _Float16 src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmv_v_f_f16m1_tu(vfloat16m1_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfmv_v_f_f16m1_tu(vfloat16m1_t maskedoff, _Float16 src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmv_v_f_f16m2_tu(vfloat16m2_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfmv_v_f_f16m2_tu(vfloat16m2_t maskedoff, _Float16 src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmv_v_f_f16m4_tu(vfloat16m4_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfmv_v_f_f16m4_tu(vfloat16m4_t maskedoff, _Float16 src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmv_v_f_f16m8_tu(vfloat16m8_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfmv_v_f_f16m8_tu(vfloat16m8_t maskedoff, _Float16 src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmv_v_f_f32mf2_tu(vfloat32mf2_t maskedoff, float src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfmv_v_f_f32mf2_tu(vfloat32mf2_t maskedoff, float src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmv_v_f_f32m1_tu(vfloat32m1_t maskedoff, float src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfmv_v_f_f32m1_tu(vfloat32m1_t maskedoff, float src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmv_v_f_f32m2_tu(vfloat32m2_t maskedoff, float src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfmv_v_f_f32m2_tu(vfloat32m2_t maskedoff, float src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmv_v_f_f32m4_tu(vfloat32m4_t maskedoff, float src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfmv_v_f_f32m4_tu(vfloat32m4_t maskedoff, float src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmv_v_f_f32m8_tu(vfloat32m8_t maskedoff, float src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfmv_v_f_f32m8_tu(vfloat32m8_t maskedoff, float src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmv_v_f_f64m1_tu(vfloat64m1_t maskedoff, double src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfmv_v_f_f64m1_tu(vfloat64m1_t maskedoff, double src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmv_v_f_f64m2_tu(vfloat64m2_t maskedoff, double src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfmv_v_f_f64m2_tu(vfloat64m2_t maskedoff, double src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmv_v_f_f64m4_tu(vfloat64m4_t maskedoff, double src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfmv_v_f_f64m4_tu(vfloat64m4_t maskedoff, double src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmv_v_f_f64m8_tu(vfloat64m8_t maskedoff, double src, size_t vl) {
- return vfmv_v_tu(maskedoff, src, vl);
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4_tu(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfmv_v_f_f64m8_tu(vfloat64m8_t maskedoff, double src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2_tu(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t maskedoff, _Float16 src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1_tu(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t maskedoff, _Float16 src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmv_s_f_f16m1_tu(vfloat16m1_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2_tu(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfmv_s_f_f16m1_tu(vfloat16m1_t maskedoff, _Float16 src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmv_s_f_f16m2_tu(vfloat16m2_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4_tu(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfmv_s_f_f16m2_tu(vfloat16m2_t maskedoff, _Float16 src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmv_s_f_f16m4_tu(vfloat16m4_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8_tu(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfmv_s_f_f16m4_tu(vfloat16m4_t maskedoff, _Float16 src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmv_s_f_f16m8_tu(vfloat16m8_t maskedoff, _Float16 src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2_tu(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfmv_s_f_f16m8_tu(vfloat16m8_t maskedoff, _Float16 src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmv_s_f_f32mf2_tu(vfloat32mf2_t maskedoff, float src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1_tu(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfmv_s_f_f32mf2_tu(vfloat32mf2_t maskedoff, float src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmv_s_f_f32m1_tu(vfloat32m1_t maskedoff, float src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2_tu(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfmv_s_f_f32m1_tu(vfloat32m1_t maskedoff, float src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmv_s_f_f32m2_tu(vfloat32m2_t maskedoff, float src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4_tu(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfmv_s_f_f32m2_tu(vfloat32m2_t maskedoff, float src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmv_s_f_f32m4_tu(vfloat32m4_t maskedoff, float src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8_tu(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfmv_s_f_f32m4_tu(vfloat32m4_t maskedoff, float src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmv_s_f_f32m8_tu(vfloat32m8_t maskedoff, float src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1_tu(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfmv_s_f_f32m8_tu(vfloat32m8_t maskedoff, float src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmv_s_f_f64m1_tu(vfloat64m1_t maskedoff, double src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2_tu(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfmv_s_f_f64m1_tu(vfloat64m1_t maskedoff, double src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmv_s_f_f64m2_tu(vfloat64m2_t maskedoff, double src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4_tu(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfmv_s_f_f64m2_tu(vfloat64m2_t maskedoff, double src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmv_s_f_f64m4_tu(vfloat64m4_t maskedoff, double src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8_tu(
@@ -274,6 +274,6 @@ vfloat64m4_t test_vfmv_s_f_f64m4_tu(vfloat64m4_t maskedoff, double src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmv_s_f_f64m8_tu(vfloat64m8_t maskedoff, double src, size_t vl) {
- return vfmv_s_tu(maskedoff, src, vl);
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt.c
index cb02ae00727a..292acfde384d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tu(
@@ -22,7 +22,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tu(
@@ -31,7 +31,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tu(
@@ -40,7 +40,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tu(
@@ -49,7 +49,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tu(
@@ -58,7 +58,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tu(
@@ -67,7 +67,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tu(
@@ -76,7 +76,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tu(
@@ -85,7 +85,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tu(
@@ -94,7 +94,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tu(
@@ -103,7 +103,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tu(
@@ -112,7 +112,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tu(
@@ -121,7 +121,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tu(
@@ -130,7 +130,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tu(
@@ -139,7 +139,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tu(
@@ -148,7 +148,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tu(
@@ -157,7 +157,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tu(
@@ -166,7 +166,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tu(
@@ -175,7 +175,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tu(
@@ -184,7 +184,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tu(
@@ -193,7 +193,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tu(
@@ -202,7 +202,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tu(
@@ -211,7 +211,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tu(
@@ -220,7 +220,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tu(
@@ -229,7 +229,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tu(
@@ -238,7 +238,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tu(
@@ -247,7 +247,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tu(
@@ -256,7 +256,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tu(
@@ -265,7 +265,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tu(
@@ -274,7 +274,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tu(
@@ -283,7 +283,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tu(
@@ -292,7 +292,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tu(
@@ -301,7 +301,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tu(
@@ -310,7 +310,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tu(
@@ -319,7 +319,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tu(
@@ -328,7 +328,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tu(
@@ -337,7 +337,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tu(
@@ -346,7 +346,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tu(
@@ -355,7 +355,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tu(
@@ -364,7 +364,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tu(
@@ -373,7 +373,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tu(
@@ -382,7 +382,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tu(
@@ -391,7 +391,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tu(
@@ -400,7 +400,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tu(
@@ -409,7 +409,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_tu(
@@ -418,7 +418,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t maskedoff, vint32mf2_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_tu(
@@ -427,7 +427,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t maskedoff, vint32m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_tu(
@@ -436,7 +436,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t maskedoff, vint32m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_tu(
@@ -445,7 +445,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t maskedoff, vint32m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tu(
@@ -454,7 +454,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t maskedoff, vint32m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_tu(
@@ -463,7 +463,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t maskedoff, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_tu(
@@ -472,7 +472,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t maskedoff, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_tu(
@@ -481,7 +481,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t maskedoff, vuint32m2_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_tu(
@@ -490,7 +490,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t maskedoff, vuint32m4_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tu(
@@ -499,7 +499,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t maskedoff, vuint32m8_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tu(
@@ -508,7 +508,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rod_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tu(
@@ -517,7 +517,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tu(
@@ -526,7 +526,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rod_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tu(
@@ -535,7 +535,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tu(
@@ -544,7 +544,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rod_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tu(
@@ -553,7 +553,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tu(
@@ -562,7 +562,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rod_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tu(
@@ -571,7 +571,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tu(
@@ -580,7 +580,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rod_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tu(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tu(
@@ -598,7 +598,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tu(
@@ -607,7 +607,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tu(
@@ -616,7 +616,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tu(
@@ -625,7 +625,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tu(
@@ -634,7 +634,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tu(
@@ -643,7 +643,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tu(
@@ -652,7 +652,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tu(
@@ -661,7 +661,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tu(
@@ -670,7 +670,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tu(
@@ -679,7 +679,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tu(
@@ -688,7 +688,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tu(
@@ -697,7 +697,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tu(
@@ -706,7 +706,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tu(
@@ -715,7 +715,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tu(
@@ -724,7 +724,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tu(
@@ -733,7 +733,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_tu(
@@ -742,7 +742,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t maskedoff, vint64m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_tu(
@@ -751,7 +751,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t maskedoff, vint64m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_tu(
@@ -760,7 +760,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t maskedoff, vint64m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_tu(
@@ -769,7 +769,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t maskedoff, vint64m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_tu(
@@ -778,7 +778,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t maskedoff, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_tu(
@@ -787,7 +787,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t maskedoff, vuint64m2_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_tu(
@@ -796,7 +796,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t maskedoff, vuint64m4_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_tu(
@@ -805,7 +805,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t maskedoff, vuint64m8_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tu(
@@ -814,7 +814,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rod_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tu(
@@ -823,7 +823,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tu(
@@ -832,7 +832,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rod_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tu(
@@ -841,7 +841,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tu(
@@ -850,7 +850,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rod_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tu(
@@ -859,7 +859,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tu(
@@ -868,7 +868,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rod_f_tu(maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_tum(
@@ -877,7 +877,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tum(
@@ -886,7 +886,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tum(
@@ -895,7 +895,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tum(
@@ -904,7 +904,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tum(
@@ -913,7 +913,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tum(
@@ -922,7 +922,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tum(
@@ -931,7 +931,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tum(
@@ -940,7 +940,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tum(
@@ -949,7 +949,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tum(
@@ -958,7 +958,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tum(
@@ -967,7 +967,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tum(
@@ -976,7 +976,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tum(
@@ -985,7 +985,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tum(
@@ -994,7 +994,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tum(
@@ -1003,7 +1003,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tum(
@@ -1012,7 +1012,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tum(
@@ -1021,7 +1021,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tum(
@@ -1030,7 +1030,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tum(
@@ -1039,7 +1039,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tum(
@@ -1048,7 +1048,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tum(
@@ -1057,7 +1057,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tum(
@@ -1066,7 +1066,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tum(
@@ -1075,7 +1075,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tum(
@@ -1084,7 +1084,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tum(
@@ -1093,7 +1093,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tum(
@@ -1102,7 +1102,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tum(
@@ -1111,7 +1111,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tum(
@@ -1120,7 +1120,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tum(
@@ -1129,7 +1129,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tum(
@@ -1138,7 +1138,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tum(
@@ -1147,7 +1147,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tum(
@@ -1156,7 +1156,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tum(
@@ -1165,7 +1165,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tum(
@@ -1174,7 +1174,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tum(
@@ -1183,7 +1183,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tum(
@@ -1192,7 +1192,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tum(
@@ -1201,7 +1201,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tum(
@@ -1210,7 +1210,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tum(
@@ -1219,7 +1219,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tum(
@@ -1228,7 +1228,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tum(
@@ -1237,7 +1237,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tum(
@@ -1246,7 +1246,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tum(
@@ -1255,7 +1255,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tum(
@@ -1264,7 +1264,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tum(
@@ -1273,7 +1273,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_tum(
@@ -1282,7 +1282,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_tum(
@@ -1291,7 +1291,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_tum(
@@ -1300,7 +1300,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_tum(
@@ -1309,7 +1309,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tum(
@@ -1318,7 +1318,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_tum(
@@ -1327,7 +1327,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_tum(
@@ -1336,7 +1336,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_tum(
@@ -1345,7 +1345,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_tum(
@@ -1354,7 +1354,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tum(
@@ -1363,7 +1363,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tum(
@@ -1372,7 +1372,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rod_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tum(
@@ -1381,7 +1381,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tum(
@@ -1390,7 +1390,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rod_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tum(
@@ -1399,7 +1399,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t mas
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tum(
@@ -1408,7 +1408,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rod_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tum(
@@ -1417,7 +1417,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tum(
@@ -1426,7 +1426,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rod_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tum(
@@ -1435,7 +1435,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tum(
@@ -1444,7 +1444,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rod_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tum(
@@ -1453,7 +1453,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tum(
@@ -1462,7 +1462,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tum(
@@ -1471,7 +1471,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tum(
@@ -1480,7 +1480,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tum(
@@ -1489,7 +1489,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tum(
@@ -1498,7 +1498,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tum(
@@ -1507,7 +1507,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tum(
@@ -1516,7 +1516,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tum(
@@ -1525,7 +1525,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tum(
@@ -1534,7 +1534,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tum(
@@ -1543,7 +1543,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tum(
@@ -1552,7 +1552,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tum(
@@ -1561,7 +1561,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tum(
@@ -1570,7 +1570,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tum(
@@ -1579,7 +1579,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tum(
@@ -1588,7 +1588,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tum(
@@ -1597,7 +1597,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_tum(
@@ -1606,7 +1606,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_tum(
@@ -1615,7 +1615,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_tum(
@@ -1624,7 +1624,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_tum(
@@ -1633,7 +1633,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_tum(
@@ -1642,7 +1642,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_tum(
@@ -1651,7 +1651,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_tum(
@@ -1660,7 +1660,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_tum(
@@ -1669,7 +1669,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tum(
@@ -1678,7 +1678,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rod_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tum(
@@ -1687,7 +1687,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tum(
@@ -1696,7 +1696,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rod_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tum(
@@ -1705,7 +1705,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tum(
@@ -1714,7 +1714,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rod_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tum(
@@ -1723,7 +1723,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tum(
@@ -1732,7 +1732,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rod_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_tumu(
@@ -1741,7 +1741,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tumu(
@@ -1750,7 +1750,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tumu(
@@ -1759,7 +1759,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tumu(
@@ -1768,7 +1768,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tumu(
@@ -1777,7 +1777,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tumu(
@@ -1786,7 +1786,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tumu(
@@ -1795,7 +1795,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tumu(
@@ -1804,7 +1804,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tumu(
@@ -1813,7 +1813,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tumu(
@@ -1822,7 +1822,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tumu(
@@ -1831,7 +1831,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tumu(
@@ -1840,7 +1840,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tumu(
@@ -1849,7 +1849,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tumu(
@@ -1858,7 +1858,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tumu(
@@ -1867,7 +1867,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tumu(
@@ -1876,7 +1876,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tumu(
@@ -1885,7 +1885,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tumu(
@@ -1894,7 +1894,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tumu(
@@ -1903,7 +1903,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tumu(
@@ -1912,7 +1912,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tumu(
@@ -1921,7 +1921,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tumu(
@@ -1930,7 +1930,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tumu(
@@ -1939,7 +1939,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tumu(
@@ -1948,7 +1948,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tumu(
@@ -1957,7 +1957,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tumu(
@@ -1966,7 +1966,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tumu(
@@ -1975,7 +1975,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tumu(
@@ -1984,7 +1984,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tumu(
@@ -1993,7 +1993,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tumu(
@@ -2002,7 +2002,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tumu(
@@ -2011,7 +2011,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tumu(
@@ -2020,7 +2020,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tumu(
@@ -2029,7 +2029,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tumu(
@@ -2038,7 +2038,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tumu(
@@ -2047,7 +2047,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tumu(
@@ -2056,7 +2056,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tumu(
@@ -2065,7 +2065,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tumu(
@@ -2074,7 +2074,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tumu(
@@ -2083,7 +2083,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t mas
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tumu(
@@ -2092,7 +2092,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tumu(
@@ -2101,7 +2101,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tumu(
@@ -2110,7 +2110,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tumu(
@@ -2119,7 +2119,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tumu(
@@ -2128,7 +2128,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tumu(
@@ -2137,7 +2137,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_tumu(
@@ -2146,7 +2146,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_tumu(
@@ -2155,7 +2155,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_tumu(
@@ -2164,7 +2164,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_tumu(
@@ -2173,7 +2173,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tumu(
@@ -2182,7 +2182,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_tumu(
@@ -2191,7 +2191,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_tumu(
@@ -2200,7 +2200,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_tumu(
@@ -2209,7 +2209,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_tumu(
@@ -2218,7 +2218,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tumu(
@@ -2227,7 +2227,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tumu(
@@ -2236,7 +2236,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tumu(
@@ -2245,7 +2245,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tumu(
@@ -2254,7 +2254,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tumu(
@@ -2263,7 +2263,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t ma
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tumu(
@@ -2272,7 +2272,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tumu(
@@ -2281,7 +2281,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tumu(
@@ -2290,7 +2290,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tumu(
@@ -2299,7 +2299,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t masked
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tumu(
@@ -2308,7 +2308,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tumu(
@@ -2317,7 +2317,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t masked
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tumu(
@@ -2326,7 +2326,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tumu(
@@ -2335,7 +2335,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tumu(
@@ -2344,7 +2344,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tumu(
@@ -2353,7 +2353,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tumu(
@@ -2362,7 +2362,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tumu(
@@ -2371,7 +2371,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tumu(
@@ -2380,7 +2380,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tumu(
@@ -2389,7 +2389,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tumu(
@@ -2398,7 +2398,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tumu(
@@ -2407,7 +2407,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tumu(
@@ -2416,7 +2416,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tumu(
@@ -2425,7 +2425,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tumu(
@@ -2434,7 +2434,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tumu(
@@ -2443,7 +2443,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tumu(
@@ -2452,7 +2452,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tumu(
@@ -2461,7 +2461,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_tumu(
@@ -2470,7 +2470,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_tumu(
@@ -2479,7 +2479,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_tumu(
@@ -2488,7 +2488,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_tumu(
@@ -2497,7 +2497,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_tumu(
@@ -2506,7 +2506,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_tumu(
@@ -2515,7 +2515,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_tumu(
@@ -2524,7 +2524,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_tumu(
@@ -2533,7 +2533,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tumu(
@@ -2542,7 +2542,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tumu(
@@ -2551,7 +2551,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tumu(
@@ -2560,7 +2560,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tumu(
@@ -2569,7 +2569,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tumu(
@@ -2578,7 +2578,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tumu(
@@ -2587,7 +2587,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maske
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tumu(
@@ -2596,7 +2596,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_mu(
@@ -2605,7 +2605,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t masked
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_mu(
@@ -2614,7 +2614,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_mu(
@@ -2623,7 +2623,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_mu(
@@ -2632,7 +2632,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_mu(
@@ -2641,7 +2641,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_mu(
@@ -2650,7 +2650,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_mu(
@@ -2659,7 +2659,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_mu(
@@ -2668,7 +2668,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_mu(
@@ -2677,7 +2677,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_mu(
@@ -2686,7 +2686,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_mu(
@@ -2695,7 +2695,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_mu(
@@ -2704,7 +2704,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_mu(
@@ -2713,7 +2713,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_mu(
@@ -2722,7 +2722,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_mu(
@@ -2731,7 +2731,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_mu(
@@ -2740,7 +2740,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_mu(
@@ -2749,7 +2749,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_mu(
@@ -2758,7 +2758,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_mu(
@@ -2767,7 +2767,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_mu(
@@ -2776,7 +2776,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_mu(
@@ -2785,7 +2785,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_mu(
@@ -2794,7 +2794,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_mu(
@@ -2803,7 +2803,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_mu(
@@ -2812,7 +2812,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_mu(
@@ -2821,7 +2821,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_mu(
@@ -2830,7 +2830,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_mu(
@@ -2839,7 +2839,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_mu(
@@ -2848,7 +2848,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_mu(
@@ -2857,7 +2857,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_mu(
@@ -2866,7 +2866,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_mu(
@@ -2875,7 +2875,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_mu(
@@ -2884,7 +2884,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_mu(
@@ -2893,7 +2893,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_mu(
@@ -2902,7 +2902,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_mu(
@@ -2911,7 +2911,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_mu(
@@ -2920,7 +2920,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_mu(
@@ -2929,7 +2929,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_mu(
@@ -2938,7 +2938,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_mu(
@@ -2947,7 +2947,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_mu(
@@ -2956,7 +2956,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_mu(
@@ -2965,7 +2965,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_mu(
@@ -2974,7 +2974,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_mu(
@@ -2983,7 +2983,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_mu(
@@ -2992,7 +2992,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_mu(
@@ -3001,7 +3001,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_mu(
@@ -3010,7 +3010,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_mu(
@@ -3019,7 +3019,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_mu(
@@ -3028,7 +3028,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_mu(
@@ -3037,7 +3037,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_mu(
@@ -3046,7 +3046,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_mu(
@@ -3055,7 +3055,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_mu(
@@ -3064,7 +3064,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_mu(
@@ -3073,7 +3073,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_mu(
@@ -3082,7 +3082,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_mu(
@@ -3091,7 +3091,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_mu(
@@ -3100,7 +3100,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfncvt_rod_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_mu(
@@ -3109,7 +3109,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_mu(
@@ -3118,7 +3118,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfncvt_rod_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_mu(
@@ -3127,7 +3127,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_mu(
@@ -3136,7 +3136,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfncvt_rod_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_mu(
@@ -3145,7 +3145,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_mu(
@@ -3154,7 +3154,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfncvt_rod_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_mu(
@@ -3163,7 +3163,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_mu(
@@ -3172,7 +3172,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vfncvt_rod_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_mu(
@@ -3181,7 +3181,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_mu(
@@ -3190,7 +3190,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_mu(
@@ -3199,7 +3199,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_mu(
@@ -3208,7 +3208,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_mu(
@@ -3217,7 +3217,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_mu(
@@ -3226,7 +3226,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_mu(
@@ -3235,7 +3235,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_mu(
@@ -3244,7 +3244,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_mu(
@@ -3253,7 +3253,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_mu(
@@ -3262,7 +3262,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_mu(
@@ -3271,7 +3271,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_mu(
@@ -3280,7 +3280,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_mu(
@@ -3289,7 +3289,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_mu(
@@ -3298,7 +3298,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_mu(
@@ -3307,7 +3307,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_mu(
@@ -3316,7 +3316,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_mu(
@@ -3325,7 +3325,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_mu(
@@ -3334,7 +3334,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_mu(
@@ -3343,7 +3343,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_mu(
@@ -3352,7 +3352,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_mu(
@@ -3361,7 +3361,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_mu(
@@ -3370,7 +3370,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_mu(
@@ -3379,7 +3379,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_mu(
@@ -3388,7 +3388,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_mu(
@@ -3397,7 +3397,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_mu(
@@ -3406,7 +3406,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vfncvt_rod_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_mu(
@@ -3415,7 +3415,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_mu(
@@ -3424,7 +3424,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vfncvt_rod_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_mu(
@@ -3433,7 +3433,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_mu(
@@ -3442,7 +3442,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vfncvt_rod_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_mu(
@@ -3451,7 +3451,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_mu(
@@ -3460,6 +3460,6 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vfncvt_rod_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfneg.c
index 00b3f994c6f1..0d57f0d27a19 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfneg.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfneg_tu(maskedoff, op1, vl);
+ return __riscv_vfneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_tum(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_tum(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_tum(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_tum(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tum(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_tum(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_tum(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_tumu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_tumu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_tumu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_tumu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_tumu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_tumu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_tumu(
@@ -355,7 +355,7 @@ vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_tumu(
@@ -364,7 +364,7 @@ vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_tumu(
@@ -373,7 +373,7 @@ vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_tumu(
@@ -382,7 +382,7 @@ vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_tumu(
@@ -391,7 +391,7 @@ vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_tumu(
@@ -400,7 +400,7 @@ vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_tumu(
@@ -409,7 +409,7 @@ vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_mu(
@@ -418,7 +418,7 @@ vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_mu(
@@ -427,7 +427,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_mu(
@@ -436,7 +436,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_mu(
@@ -445,7 +445,7 @@ vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_mu(
@@ -454,7 +454,7 @@ vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_mu(
@@ -463,7 +463,7 @@ vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_mu(
@@ -472,7 +472,7 @@ vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_mu(
@@ -481,7 +481,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_mu(
@@ -490,7 +490,7 @@ vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_mu(
@@ -499,7 +499,7 @@ vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_mu(
@@ -508,7 +508,7 @@ vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_mu(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_mu(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_mu(
@@ -535,7 +535,7 @@ vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_mu(
@@ -544,6 +544,6 @@ vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfneg_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfneg_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc.c
index 52d5bef0509c..7911c9da0881 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, floa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd.c
index f63963ca8000..56695508d12f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, floa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac.c
index 710072ccb852..3fb7aa59db4e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, floa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub.c
index 16cef5909708..ae8987a6135b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, floa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float r
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double r
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 r
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) {
- return vfnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrdiv.c
index 2297e602fdfc..cfbe7906766e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrdiv.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrdiv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_tum(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_tum(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_tum(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_tum(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tum(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_tum(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_tum(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrdiv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_tumu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_tumu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_tumu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_tumu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_tumu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_tumu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_tumu(
@@ -355,7 +355,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_tumu(
@@ -364,7 +364,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_tumu(
@@ -373,7 +373,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_tumu(
@@ -382,7 +382,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_tumu(
@@ -391,7 +391,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_tumu(
@@ -400,7 +400,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_tumu(
@@ -409,7 +409,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_mu(
@@ -418,7 +418,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_mu(
@@ -427,7 +427,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_mu(
@@ -436,7 +436,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_mu(
@@ -445,7 +445,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_mu(
@@ -454,7 +454,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_mu(
@@ -463,7 +463,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_mu(
@@ -472,7 +472,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_mu(
@@ -481,7 +481,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_mu(
@@ -490,7 +490,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_mu(
@@ -499,7 +499,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_mu(
@@ -508,7 +508,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_mu(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_mu(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_mu(
@@ -535,7 +535,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_mu(
@@ -544,6 +544,6 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrdiv_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrdiv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c
index 1526d63b8626..c0549904f7b9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfrec7_tu(maskedoff, op1, vl);
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_tum(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_tum(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_tum(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_tum(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tum(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_tum(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_tum(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfrec7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_tumu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_tumu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_tumu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_tumu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_tumu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_tumu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_tumu(
@@ -355,7 +355,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_tumu(
@@ -364,7 +364,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_tumu(
@@ -373,7 +373,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_tumu(
@@ -382,7 +382,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_tumu(
@@ -391,7 +391,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_tumu(
@@ -400,7 +400,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_tumu(
@@ -409,7 +409,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfrec7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_mu(
@@ -418,7 +418,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_mu(
@@ -427,7 +427,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_mu(
@@ -436,7 +436,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_mu(
@@ -445,7 +445,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_mu(
@@ -454,7 +454,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_mu(
@@ -463,7 +463,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_mu(
@@ -472,7 +472,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_mu(
@@ -481,7 +481,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_mu(
@@ -490,7 +490,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_mu(
@@ -499,7 +499,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_mu(
@@ -508,7 +508,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_mu(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_mu(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_mu(
@@ -535,7 +535,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_mu(
@@ -544,6 +544,6 @@ vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfrec7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmax.c
index 1a38f7e676c9..2640fd0897ce 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmax.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tu(
@@ -22,7 +22,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tu(
@@ -94,7 +94,7 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tu(
@@ -103,7 +103,7 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tu(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tu(
@@ -139,7 +139,7 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_tum(
@@ -148,7 +148,7 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tum(
@@ -157,7 +157,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tum(
@@ -184,7 +184,7 @@ vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tum(
@@ -193,7 +193,7 @@ vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tum(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tum(
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tum(
@@ -238,7 +238,7 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tum(
@@ -265,7 +265,7 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tum(
@@ -274,6 +274,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmin.c
index f082593c9972..625a58f3db5c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredmin.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tu(
@@ -22,7 +22,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tu(
@@ -94,7 +94,7 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tu(
@@ -103,7 +103,7 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tu(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tu(
@@ -139,7 +139,7 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_tum(
@@ -148,7 +148,7 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tum(
@@ -157,7 +157,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tum(
@@ -184,7 +184,7 @@ vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tum(
@@ -193,7 +193,7 @@ vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tum(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tum(
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tum(
@@ -238,7 +238,7 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tum(
@@ -265,7 +265,7 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tum(
@@ -274,6 +274,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredosum.c
index eaefbd8a3ecf..6d6bddf59f27 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredosum.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tu(
@@ -22,7 +22,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tu(
@@ -94,7 +94,7 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tu(
@@ -103,7 +103,7 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tu(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tu(
@@ -139,7 +139,7 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tum(
@@ -148,7 +148,7 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tum(
@@ -157,7 +157,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tum(
@@ -184,7 +184,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tum(
@@ -193,7 +193,7 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tum(
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tum(
@@ -238,7 +238,7 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tum(
@@ -265,7 +265,7 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tum(
@@ -274,6 +274,6 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredusum.c
index 188366cb0a22..994baeba314a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredusum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfredusum.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tu(
@@ -22,7 +22,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tu(
@@ -94,7 +94,7 @@ vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tu(
@@ -103,7 +103,7 @@ vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tu(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tu(
@@ -139,7 +139,7 @@ vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tum(
@@ -148,7 +148,7 @@ vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tum(
@@ -157,7 +157,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tum(
@@ -184,7 +184,7 @@ vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tum(
@@ -193,7 +193,7 @@ vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tum(
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tum(
@@ -238,7 +238,7 @@ vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tum(
@@ -265,7 +265,7 @@ vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tum(
@@ -274,6 +274,6 @@ vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7.c
index dc5f0adab1ac..07de56b9e282 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfrsqrt7_tu(maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_tum(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_tum(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_tum(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_tum(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tum(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_tum(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_tum(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfrsqrt7_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_tumu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_tumu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_tumu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_tumu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_tumu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_tumu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_tumu(
@@ -355,7 +355,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_tumu(
@@ -364,7 +364,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_tumu(
@@ -373,7 +373,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_tumu(
@@ -382,7 +382,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_tumu(
@@ -391,7 +391,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_tumu(
@@ -400,7 +400,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_tumu(
@@ -409,7 +409,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_mu(
@@ -418,7 +418,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_mu(
@@ -427,7 +427,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_mu(
@@ -436,7 +436,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_mu(
@@ -445,7 +445,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_mu(
@@ -454,7 +454,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_mu(
@@ -463,7 +463,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_mu(
@@ -472,7 +472,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_mu(
@@ -481,7 +481,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_mu(
@@ -490,7 +490,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_mu(
@@ -499,7 +499,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_mu(
@@ -508,7 +508,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_mu(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_mu(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_mu(
@@ -535,7 +535,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_mu(
@@ -544,6 +544,6 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsqrt7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfrsqrt7_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub.c
index 2a49f3df3c20..1515d5f22f93 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_tum(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_tum(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_tum(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_tum(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tum(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_tum(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_tum(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_tumu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_tumu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_tumu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_tumu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_tumu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_tumu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_tumu(
@@ -355,7 +355,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_tumu(
@@ -364,7 +364,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_tumu(
@@ -373,7 +373,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_tumu(
@@ -382,7 +382,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_tumu(
@@ -391,7 +391,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_tumu(
@@ -400,7 +400,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_tumu(
@@ -409,7 +409,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_mu(
@@ -418,7 +418,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_mu(
@@ -427,7 +427,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_mu(
@@ -436,7 +436,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_mu(
@@ -445,7 +445,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_mu(
@@ -454,7 +454,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_mu(
@@ -463,7 +463,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_mu(
@@ -472,7 +472,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_mu(
@@ -481,7 +481,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_mu(
@@ -490,7 +490,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_mu(
@@ -499,7 +499,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_mu(
@@ -508,7 +508,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_mu(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_mu(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_mu(
@@ -535,7 +535,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_mu(
@@ -544,6 +544,6 @@ vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj.c
index 7e4795f17d1d..a05119fb2760 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnj_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, d
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnj_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn.c
index 81b1d08f1fdc..96781b0d2b1d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjn_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjn_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx.c
index c2c33f11036b..48e9d1ccea74 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsgnjx_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down.c
index a2e944adbf1e..393038538d85 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_tum(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_tum(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_tum(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_tum(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tum(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_tum(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_tum(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_tumu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_tumu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_tumu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t mas
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_tumu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_tumu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_tumu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_tumu(
@@ -355,7 +355,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_tumu(
@@ -364,7 +364,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_tumu(
@@ -373,7 +373,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_tumu(
@@ -382,7 +382,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_tumu(
@@ -391,7 +391,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_tumu(
@@ -400,7 +400,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_tumu(
@@ -409,7 +409,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_mu(
@@ -418,7 +418,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_mu(
@@ -427,7 +427,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_mu(
@@ -436,7 +436,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_mu(
@@ -445,7 +445,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_mu(
@@ -454,7 +454,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_mu(
@@ -463,7 +463,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_mu(
@@ -472,7 +472,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_mu(
@@ -481,7 +481,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_mu(
@@ -490,7 +490,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_mu(
@@ -499,7 +499,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_mu(
@@ -508,7 +508,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_mu(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_mu(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_mu(
@@ -535,7 +535,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_mu(
@@ -544,6 +544,6 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up.c
index 45d31f40ea28..da69d4cdca2b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t sr
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t sr
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t sr
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t sr
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t sr
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t sr
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t sr
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_tum(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t sr
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_tum(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_tum(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_tum(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tum(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_tum(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_tum(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_tumu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_tumu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_tumu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_tumu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_tumu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_tumu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_tumu(
@@ -355,7 +355,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_tumu(
@@ -364,7 +364,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_tumu(
@@ -373,7 +373,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_tumu(
@@ -382,7 +382,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_tumu(
@@ -391,7 +391,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_tumu(
@@ -400,7 +400,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_tumu(
@@ -409,7 +409,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_mu(
@@ -418,7 +418,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_mu(
@@ -427,7 +427,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_mu(
@@ -436,7 +436,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_mu(
@@ -445,7 +445,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_mu(
@@ -454,7 +454,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_mu(
@@ -463,7 +463,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_mu(
@@ -472,7 +472,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_mu(
@@ -481,7 +481,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_mu(
@@ -490,7 +490,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_mu(
@@ -499,7 +499,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_mu(
@@ -508,7 +508,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_mu(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_mu(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_mu(
@@ -535,7 +535,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_mu(
@@ -544,6 +544,6 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
- return vfslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c
index 56d28a40afb4..926718e15d15 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfsqrt_tu(maskedoff, op1, vl);
+ return __riscv_vfsqrt_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_tum(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_tum(
@@ -157,7 +157,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_tum(
@@ -166,7 +166,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_tum(
@@ -175,7 +175,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_tum(
@@ -184,7 +184,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_tum(
@@ -193,7 +193,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tum(
@@ -202,7 +202,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_tum(
@@ -211,7 +211,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_tum(
@@ -247,7 +247,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_tum(
@@ -256,7 +256,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_tum(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_tum(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfsqrt_tum(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_tumu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_tumu(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_tumu(
@@ -301,7 +301,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_tumu(
@@ -310,7 +310,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_tumu(
@@ -319,7 +319,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_tumu(
@@ -328,7 +328,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_tumu(
@@ -355,7 +355,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_tumu(
@@ -364,7 +364,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_tumu(
@@ -373,7 +373,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_tumu(
@@ -382,7 +382,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_tumu(
@@ -391,7 +391,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_tumu(
@@ -400,7 +400,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_tumu(
@@ -409,7 +409,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfsqrt_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_mu(
@@ -418,7 +418,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_mu(
@@ -427,7 +427,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_mu(
@@ -436,7 +436,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_mu(
@@ -445,7 +445,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_mu(
@@ -454,7 +454,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_mu(
@@ -463,7 +463,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_mu(
@@ -472,7 +472,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_mu(
@@ -481,7 +481,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_mu(
@@ -490,7 +490,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_mu(
@@ -499,7 +499,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_mu(
@@ -508,7 +508,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_mu(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_mu(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_mu(
@@ -535,7 +535,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_mu(
@@ -544,6 +544,6 @@ vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
- return vfsqrt_mu(mask, maskedoff, op1, vl);
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub.c
index e2105684d3ca..249e3e3e9239 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_tum(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_tum(
@@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_tum(
@@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_tum(
@@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_tum(
@@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_tum(
@@ -328,7 +328,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_tum(
@@ -337,7 +337,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_tum(
@@ -346,7 +346,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_tum(
@@ -355,7 +355,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_tum(
@@ -364,7 +364,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_tum(
@@ -373,7 +373,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_tum(
@@ -382,7 +382,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tum(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tum(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_tum(
@@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_tum(
@@ -418,7 +418,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_tum(
@@ -472,7 +472,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_tum(
@@ -499,7 +499,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_tum(
@@ -508,7 +508,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_tum(
@@ -517,7 +517,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_tum(
@@ -526,7 +526,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_tum(
@@ -535,7 +535,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_tum(
@@ -544,7 +544,7 @@ vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_tumu(
@@ -553,7 +553,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_tumu(
@@ -562,7 +562,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_tumu(
@@ -571,7 +571,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_tumu(
@@ -580,7 +580,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_tumu(
@@ -589,7 +589,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_tumu(
@@ -598,7 +598,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_tumu(
@@ -607,7 +607,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_tumu(
@@ -616,7 +616,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_tumu(
@@ -625,7 +625,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_tumu(
@@ -634,7 +634,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_tumu(
@@ -643,7 +643,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_tumu(
@@ -652,7 +652,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_tumu(
@@ -688,7 +688,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_tumu(
@@ -697,7 +697,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_tumu(
@@ -706,7 +706,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_tumu(
@@ -715,7 +715,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_tumu(
@@ -724,7 +724,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_tumu(
@@ -733,7 +733,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_tumu(
@@ -742,7 +742,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_tumu(
@@ -751,7 +751,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_tumu(
@@ -760,7 +760,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_tumu(
@@ -769,7 +769,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_tumu(
@@ -778,7 +778,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_tumu(
@@ -787,7 +787,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_tumu(
@@ -796,7 +796,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_tumu(
@@ -805,7 +805,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_tumu(
@@ -814,7 +814,7 @@ vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_mu(
@@ -823,7 +823,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_mu(
@@ -832,7 +832,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_mu(
@@ -841,7 +841,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_mu(
@@ -850,7 +850,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_mu(
@@ -859,7 +859,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_mu(
@@ -868,7 +868,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_mu(
@@ -877,7 +877,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_mu(
@@ -886,7 +886,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_mu(
@@ -895,7 +895,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_mu(
@@ -904,7 +904,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_mu(
@@ -913,7 +913,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_mu(
@@ -922,7 +922,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_mu(
@@ -931,7 +931,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_mu(
@@ -940,7 +940,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_mu(
@@ -949,7 +949,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_mu(
@@ -958,7 +958,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_mu(
@@ -967,7 +967,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_mu(
@@ -976,7 +976,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_mu(
@@ -985,7 +985,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_mu(
@@ -994,7 +994,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_mu(
@@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_mu(
@@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_mu(
@@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_mu(
@@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_mu(
@@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_mu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_mu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_mu(
@@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_mu(
@@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd.c
index 73de814846ab..b394f77efe78 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_tu(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_tu(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_tu(
@@ -40,7 +40,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_tu(
@@ -49,7 +49,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_tu(
@@ -58,7 +58,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_tu(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_tu(
@@ -103,7 +103,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_tu(
@@ -112,7 +112,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_tu(
@@ -121,7 +121,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_tu(
@@ -130,7 +130,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_tu(
@@ -139,7 +139,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_tu(
@@ -148,7 +148,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_tu(
@@ -157,7 +157,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_tu(
@@ -166,7 +166,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_tu(
@@ -175,7 +175,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_tu(
@@ -184,7 +184,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tu(
@@ -193,7 +193,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tu(
@@ -202,7 +202,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwadd_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwadd_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwadd_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_tu(
@@ -256,7 +256,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwadd_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_tu(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_tu(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwadd_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_tu(
@@ -283,7 +283,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_tu(
@@ -292,7 +292,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwadd_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_tu(
@@ -301,7 +301,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_tu(
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwadd_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_tu(
@@ -319,7 +319,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_tu(
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwadd_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_tum(
@@ -337,7 +337,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_tum(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_tum(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_tum(
@@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_tum(
@@ -373,7 +373,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_tum(
@@ -382,7 +382,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_tum(
@@ -391,7 +391,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_tum(
@@ -400,7 +400,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_tum(
@@ -409,7 +409,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_tum(
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_tum(
@@ -472,7 +472,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_tum(
@@ -481,7 +481,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_tum(
@@ -490,7 +490,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_tum(
@@ -499,7 +499,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_tum(
@@ -508,7 +508,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tum(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tum(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tum(
@@ -535,7 +535,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tum(
@@ -544,7 +544,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_tum(
@@ -553,7 +553,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_tum(
@@ -562,7 +562,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_tum(
@@ -571,7 +571,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_tum(
@@ -580,7 +580,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_tum(
@@ -589,7 +589,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_tum(
@@ -598,7 +598,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_tum(
@@ -607,7 +607,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_tum(
@@ -616,7 +616,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_tum(
@@ -625,7 +625,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_tum(
@@ -634,7 +634,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_tum(
@@ -643,7 +643,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_tum(
@@ -652,7 +652,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_tumu(
@@ -688,7 +688,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_tumu(
@@ -697,7 +697,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_tumu(
@@ -706,7 +706,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_tumu(
@@ -715,7 +715,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_tumu(
@@ -724,7 +724,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_tumu(
@@ -733,7 +733,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_tumu(
@@ -742,7 +742,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_tumu(
@@ -751,7 +751,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_tumu(
@@ -760,7 +760,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_tumu(
@@ -769,7 +769,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_tumu(
@@ -778,7 +778,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_tumu(
@@ -787,7 +787,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_tumu(
@@ -796,7 +796,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_tumu(
@@ -805,7 +805,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_tumu(
@@ -814,7 +814,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_tumu(
@@ -823,7 +823,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_tumu(
@@ -832,7 +832,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tumu(
@@ -841,7 +841,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tumu(
@@ -850,7 +850,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tumu(
@@ -859,7 +859,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tumu(
@@ -868,7 +868,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_tumu(
@@ -877,7 +877,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_tumu(
@@ -886,7 +886,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_tumu(
@@ -895,7 +895,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_tumu(
@@ -904,7 +904,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_tumu(
@@ -913,7 +913,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_tumu(
@@ -922,7 +922,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_tumu(
@@ -931,7 +931,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_tumu(
@@ -940,7 +940,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_tumu(
@@ -949,7 +949,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_tumu(
@@ -958,7 +958,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_tumu(
@@ -967,7 +967,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_tumu(
@@ -976,7 +976,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_mu(
@@ -985,7 +985,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_mu(
@@ -994,7 +994,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_mu(
@@ -1003,7 +1003,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_mu(
@@ -1012,7 +1012,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_mu(
@@ -1030,7 +1030,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_mu(
@@ -1039,7 +1039,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_mu(
@@ -1048,7 +1048,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_mu(
@@ -1057,7 +1057,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_mu(
@@ -1066,7 +1066,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_mu(
@@ -1075,7 +1075,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_mu(
@@ -1084,7 +1084,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_mu(
@@ -1093,7 +1093,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_mu(
@@ -1102,7 +1102,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_mu(
@@ -1111,7 +1111,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_mu(
@@ -1120,7 +1120,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_mu(
@@ -1129,7 +1129,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_mu(
@@ -1138,7 +1138,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_mu(
@@ -1147,7 +1147,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_mu(
@@ -1156,7 +1156,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_mu(
@@ -1165,7 +1165,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_mu(
@@ -1174,7 +1174,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_mu(
@@ -1183,7 +1183,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_mu(
@@ -1192,7 +1192,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_mu(
@@ -1201,7 +1201,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_mu(
@@ -1210,7 +1210,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_mu(
@@ -1219,7 +1219,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_mu(
@@ -1228,7 +1228,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_mu(
@@ -1237,7 +1237,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_mu(
@@ -1246,7 +1246,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_mu(
@@ -1255,7 +1255,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_mu(
@@ -1264,7 +1264,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_mu(
@@ -1273,7 +1273,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_mu(
@@ -1282,7 +1282,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_mu(
@@ -1291,7 +1291,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_mu(
@@ -1300,6 +1300,6 @@ vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt.c
index bf593304ebde..4d85fb90ce12 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint8mf8_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint8mf4_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint8mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint8m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint8m2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint8m4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_tu(
@@ -76,7 +76,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_tu(
@@ -85,7 +85,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_tu(
@@ -94,7 +94,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint8mf2_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_tu(
@@ -103,7 +103,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint8m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint8m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint8m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tu(
@@ -130,7 +130,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tu(
@@ -139,7 +139,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tu(
@@ -148,7 +148,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tu(
@@ -157,7 +157,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tu(
@@ -166,7 +166,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tu(
@@ -175,7 +175,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tu(
@@ -184,7 +184,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tu(
@@ -193,7 +193,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tu(
@@ -202,7 +202,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tu(
@@ -211,7 +211,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tu(
@@ -220,7 +220,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tu(
@@ -229,7 +229,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tu(
@@ -238,7 +238,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tu(
@@ -247,7 +247,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tu(
@@ -256,7 +256,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tu(
@@ -265,7 +265,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tu(
@@ -274,7 +274,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tu(
@@ -283,7 +283,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tu(
@@ -292,7 +292,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tu(
@@ -301,7 +301,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_tu(
@@ -310,7 +310,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint16mf4_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_tu(
@@ -319,7 +319,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint16mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_tu(
@@ -328,7 +328,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint16m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_tu(
@@ -337,7 +337,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint16m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_tu(
@@ -346,7 +346,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint16m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_tu(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_tu(
@@ -364,7 +364,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint16mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_tu(
@@ -373,7 +373,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint16m1_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_tu(
@@ -382,7 +382,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint16m2_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_tu(
@@ -391,7 +391,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint16m4_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_tu(
@@ -400,7 +400,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_tu(
@@ -409,7 +409,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_tu(
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_tu(
@@ -427,7 +427,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tu(
@@ -436,7 +436,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tu(
@@ -445,7 +445,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tu(
@@ -454,7 +454,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tu(
@@ -463,7 +463,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tu(
@@ -472,7 +472,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tu(
@@ -481,7 +481,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tu(
@@ -490,7 +490,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tu(
@@ -499,7 +499,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_x_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tu(
@@ -508,7 +508,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tu(
@@ -517,7 +517,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tu(
@@ -526,7 +526,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tu(
@@ -535,7 +535,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tu(
@@ -544,7 +544,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tu(
@@ -553,7 +553,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tu(
@@ -562,7 +562,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tu(
@@ -571,7 +571,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_xu_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tu(
@@ -580,7 +580,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_tu(
@@ -589,7 +589,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_tu(
@@ -598,7 +598,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint32m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_tu(
@@ -607,7 +607,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint32m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tu(
@@ -616,7 +616,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint32m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_tu(
@@ -625,7 +625,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint32mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_tu(
@@ -634,7 +634,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint32m1_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_tu(
@@ -643,7 +643,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint32m2_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tu(
@@ -652,7 +652,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint32m4_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_tu(
@@ -661,7 +661,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_tu(
@@ -670,7 +670,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_tu(
@@ -679,7 +679,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t src
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_f_tu(maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_tum(
@@ -688,7 +688,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_tum(
@@ -697,7 +697,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_tum(
@@ -706,7 +706,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_tum(
@@ -715,7 +715,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_tum(
@@ -724,7 +724,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_tum(
@@ -733,7 +733,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_tum(
@@ -742,7 +742,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_tum(
@@ -751,7 +751,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_tum(
@@ -760,7 +760,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_tum(
@@ -769,7 +769,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_tum(
@@ -778,7 +778,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_tum(
@@ -787,7 +787,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_tum(
@@ -796,7 +796,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tum(
@@ -805,7 +805,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tum(
@@ -814,7 +814,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tum(
@@ -823,7 +823,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tum(
@@ -832,7 +832,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tum(
@@ -841,7 +841,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tum(
@@ -850,7 +850,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tum(
@@ -859,7 +859,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tum(
@@ -868,7 +868,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tum(
@@ -877,7 +877,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tum(
@@ -886,7 +886,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tum(
@@ -895,7 +895,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tum(
@@ -904,7 +904,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tum(
@@ -913,7 +913,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tum(
@@ -922,7 +922,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tum(
@@ -931,7 +931,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tum(
@@ -940,7 +940,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tum(
@@ -949,7 +949,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tum(
@@ -958,7 +958,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tum(
@@ -967,7 +967,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tum(
@@ -976,7 +976,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_tum(
@@ -985,7 +985,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_tum(
@@ -994,7 +994,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_tum(
@@ -1003,7 +1003,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_tum(
@@ -1012,7 +1012,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_tum(
@@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_tum(
@@ -1030,7 +1030,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_tum(
@@ -1039,7 +1039,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_tum(
@@ -1048,7 +1048,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_tum(
@@ -1057,7 +1057,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_tum(
@@ -1066,7 +1066,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_tum(
@@ -1075,7 +1075,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_tum(
@@ -1084,7 +1084,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_tum(
@@ -1093,7 +1093,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_tum(
@@ -1102,7 +1102,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tum(
@@ -1111,7 +1111,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tum(
@@ -1120,7 +1120,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tum(
@@ -1129,7 +1129,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tum(
@@ -1138,7 +1138,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tum(
@@ -1147,7 +1147,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tum(
@@ -1156,7 +1156,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tum(
@@ -1165,7 +1165,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tum(
@@ -1174,7 +1174,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tum(
@@ -1183,7 +1183,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tum(
@@ -1192,7 +1192,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tum(
@@ -1201,7 +1201,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tum(
@@ -1210,7 +1210,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tum(
@@ -1219,7 +1219,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tum(
@@ -1228,7 +1228,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tum(
@@ -1237,7 +1237,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tum(
@@ -1246,7 +1246,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tum(
@@ -1255,7 +1255,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_tum(
@@ -1264,7 +1264,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_tum(
@@ -1273,7 +1273,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_tum(
@@ -1282,7 +1282,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tum(
@@ -1291,7 +1291,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_tum(
@@ -1300,7 +1300,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_tum(
@@ -1309,7 +1309,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_tum(
@@ -1318,7 +1318,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tum(
@@ -1327,7 +1327,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_tum(
@@ -1336,7 +1336,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_tum(
@@ -1345,7 +1345,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_tum(
@@ -1354,7 +1354,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_f_tum(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_tumu(
@@ -1363,7 +1363,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_tumu(
@@ -1372,7 +1372,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_tumu(
@@ -1381,7 +1381,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_tumu(
@@ -1390,7 +1390,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_tumu(
@@ -1399,7 +1399,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_tumu(
@@ -1408,7 +1408,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_tumu(
@@ -1417,7 +1417,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_tumu(
@@ -1426,7 +1426,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_tumu(
@@ -1435,7 +1435,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_tumu(
@@ -1444,7 +1444,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_tumu(
@@ -1453,7 +1453,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_tumu(
@@ -1462,7 +1462,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_tumu(
@@ -1471,7 +1471,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tumu(
@@ -1480,7 +1480,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tumu(
@@ -1489,7 +1489,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tumu(
@@ -1498,7 +1498,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tumu(
@@ -1507,7 +1507,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tumu(
@@ -1516,7 +1516,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tumu(
@@ -1525,7 +1525,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tumu(
@@ -1534,7 +1534,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tumu(
@@ -1543,7 +1543,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tumu(
@@ -1552,7 +1552,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tumu(
@@ -1561,7 +1561,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(
@@ -1570,7 +1570,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tumu(
@@ -1579,7 +1579,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tumu(
@@ -1588,7 +1588,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tumu(
@@ -1597,7 +1597,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tumu(
@@ -1606,7 +1606,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tumu(
@@ -1615,7 +1615,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tumu(
@@ -1624,7 +1624,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tumu(
@@ -1633,7 +1633,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tumu(
@@ -1642,7 +1642,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tumu(
@@ -1651,7 +1651,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_tumu(
@@ -1660,7 +1660,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_tumu(
@@ -1669,7 +1669,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_tumu(
@@ -1678,7 +1678,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_tumu(
@@ -1687,7 +1687,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_tumu(
@@ -1696,7 +1696,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_tumu(
@@ -1705,7 +1705,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_tumu(
@@ -1714,7 +1714,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_tumu(
@@ -1723,7 +1723,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_tumu(
@@ -1732,7 +1732,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_tumu(
@@ -1741,7 +1741,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_tumu(
@@ -1750,7 +1750,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_tumu(
@@ -1759,7 +1759,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_tumu(
@@ -1768,7 +1768,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_tumu(
@@ -1777,7 +1777,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tumu(
@@ -1786,7 +1786,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tumu(
@@ -1795,7 +1795,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tumu(
@@ -1804,7 +1804,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tumu(
@@ -1813,7 +1813,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tumu(
@@ -1822,7 +1822,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tumu(
@@ -1831,7 +1831,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tumu(
@@ -1840,7 +1840,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tumu(
@@ -1849,7 +1849,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tumu(
@@ -1858,7 +1858,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tumu(
@@ -1867,7 +1867,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tumu(
@@ -1876,7 +1876,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tumu(
@@ -1885,7 +1885,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tumu(
@@ -1894,7 +1894,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tumu(
@@ -1903,7 +1903,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tumu(
@@ -1912,7 +1912,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tumu(
@@ -1921,7 +1921,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tumu(
@@ -1930,7 +1930,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_tumu(
@@ -1939,7 +1939,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_tumu(
@@ -1948,7 +1948,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_tumu(
@@ -1957,7 +1957,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tumu(
@@ -1966,7 +1966,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_tumu(
@@ -1975,7 +1975,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_tumu(
@@ -1984,7 +1984,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_tumu(
@@ -1993,7 +1993,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tumu(
@@ -2002,7 +2002,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_tumu(
@@ -2011,7 +2011,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_tumu(
@@ -2020,7 +2020,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_tumu(
@@ -2029,7 +2029,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_f_tumu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_mu(
@@ -2038,7 +2038,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_mu(
@@ -2047,7 +2047,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_mu(
@@ -2056,7 +2056,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_mu(
@@ -2065,7 +2065,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_mu(
@@ -2074,7 +2074,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_mu(
@@ -2083,7 +2083,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_mu(
@@ -2092,7 +2092,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_mu(
@@ -2101,7 +2101,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_mu(
@@ -2110,7 +2110,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_mu(
@@ -2119,7 +2119,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_mu(
@@ -2128,7 +2128,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_mu(
@@ -2137,7 +2137,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_mu(
@@ -2146,7 +2146,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_mu(
@@ -2155,7 +2155,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_mu(
@@ -2164,7 +2164,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_mu(
@@ -2173,7 +2173,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_mu(
@@ -2182,7 +2182,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_mu(
@@ -2191,7 +2191,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_mu(
@@ -2200,7 +2200,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_mu(
@@ -2209,7 +2209,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_mu(
@@ -2218,7 +2218,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_mu(
@@ -2227,7 +2227,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_mu(
@@ -2236,7 +2236,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_mu(
@@ -2245,7 +2245,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_mu(
@@ -2254,7 +2254,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_mu(
@@ -2263,7 +2263,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_mu(
@@ -2272,7 +2272,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_mu(
@@ -2281,7 +2281,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_mu(
@@ -2290,7 +2290,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_mu(
@@ -2299,7 +2299,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_mu(
@@ -2308,7 +2308,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_mu(
@@ -2317,7 +2317,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_mu(
@@ -2326,7 +2326,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_mu(
@@ -2335,7 +2335,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_mu(
@@ -2344,7 +2344,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_mu(
@@ -2353,7 +2353,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_mu(
@@ -2362,7 +2362,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_mu(
@@ -2371,7 +2371,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_mu(
@@ -2380,7 +2380,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_mu(
@@ -2389,7 +2389,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_mu(
@@ -2398,7 +2398,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_mu(
@@ -2407,7 +2407,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_mu(
@@ -2416,7 +2416,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_mu(
@@ -2425,7 +2425,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_mu(
@@ -2434,7 +2434,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_mu(
@@ -2443,7 +2443,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_mu(
@@ -2452,7 +2452,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_mu(
@@ -2461,7 +2461,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_mu(
@@ -2470,7 +2470,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_mu(
@@ -2479,7 +2479,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_mu(
@@ -2488,7 +2488,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_mu(
@@ -2497,7 +2497,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_mu(
@@ -2506,7 +2506,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_mu(
@@ -2515,7 +2515,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_mu(
@@ -2524,7 +2524,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_mu(
@@ -2533,7 +2533,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_mu(
@@ -2542,7 +2542,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_mu(
@@ -2551,7 +2551,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_mu(
@@ -2560,7 +2560,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_mu(
@@ -2569,7 +2569,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_mu(
@@ -2578,7 +2578,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_mu(
@@ -2587,7 +2587,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_mu(
@@ -2596,7 +2596,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_mu(
@@ -2605,7 +2605,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_mu(
@@ -2614,7 +2614,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_mu(
@@ -2623,7 +2623,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_mu(
@@ -2632,7 +2632,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_mu(
@@ -2641,7 +2641,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_mu(
@@ -2650,7 +2650,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_mu(
@@ -2659,7 +2659,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_mu(
@@ -2668,7 +2668,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_mu(
@@ -2677,7 +2677,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_mu(
@@ -2686,7 +2686,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_mu(
@@ -2695,7 +2695,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_mu(
@@ -2704,6 +2704,6 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vfwcvt_f_mu(mask, maskedoff, src, vl);
+ return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc.c
index 795b3173ba92..183e69450591 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_tu(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_tu(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_tu(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_tu(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_tu(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_tu(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_tu(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_tu(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_tu(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tu(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_tu(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_tu(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_tu(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_tu(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_tum(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_tum(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_tum(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_tum(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_tum(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_tum(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_tum(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tum(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tum(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_tum(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_tum(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_tum(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_tum(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_tum(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_tum(
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_tumu(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_tumu(
@@ -364,7 +364,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_tumu(
@@ -373,7 +373,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_tumu(
@@ -382,7 +382,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_tumu(
@@ -391,7 +391,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_tumu(
@@ -400,7 +400,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_tumu(
@@ -409,7 +409,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_tumu(
@@ -418,7 +418,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tumu(
@@ -427,7 +427,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tumu(
@@ -436,7 +436,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_tumu(
@@ -445,7 +445,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_tumu(
@@ -454,7 +454,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_tumu(
@@ -463,7 +463,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_tumu(
@@ -472,7 +472,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_tumu(
@@ -481,7 +481,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_tumu(
@@ -490,7 +490,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_mu(
@@ -499,7 +499,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_mu(
@@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_mu(
@@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_mu(
@@ -526,7 +526,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_mu(
@@ -535,7 +535,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_mu(
@@ -544,7 +544,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_mu(
@@ -553,7 +553,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_mu(
@@ -562,7 +562,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_mu(
@@ -571,7 +571,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_mu(
@@ -580,7 +580,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_mu(
@@ -589,7 +589,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_mu(
@@ -598,7 +598,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_mu(
@@ -607,7 +607,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_mu(
@@ -616,7 +616,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_mu(
@@ -625,7 +625,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_mu(
@@ -634,7 +634,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_mu(
@@ -643,7 +643,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_mu(
@@ -652,6 +652,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac.c
index 6c4ccb8f459c..a74ed72cf877 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_tu(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_tu(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_tu(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_tu(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_tu(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_tu(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_tu(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_tu(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_tu(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tu(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_tu(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_tu(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_tu(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_tu(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_tum(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_tum(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_tum(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_tum(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_tum(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_tum(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_tum(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tum(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tum(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_tum(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_tum(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_tum(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_tum(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_tum(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_tum(
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_tumu(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_tumu(
@@ -364,7 +364,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_tumu(
@@ -373,7 +373,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_tumu(
@@ -382,7 +382,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_tumu(
@@ -391,7 +391,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_tumu(
@@ -400,7 +400,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_tumu(
@@ -409,7 +409,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_tumu(
@@ -418,7 +418,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tumu(
@@ -427,7 +427,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tumu(
@@ -436,7 +436,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_tumu(
@@ -445,7 +445,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_tumu(
@@ -454,7 +454,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_tumu(
@@ -463,7 +463,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_tumu(
@@ -472,7 +472,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_tumu(
@@ -481,7 +481,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_tumu(
@@ -490,7 +490,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_mu(
@@ -499,7 +499,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_mu(
@@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_mu(
@@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_mu(
@@ -526,7 +526,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_mu(
@@ -535,7 +535,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_mu(
@@ -544,7 +544,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_mu(
@@ -553,7 +553,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_mu(
@@ -562,7 +562,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_mu(
@@ -571,7 +571,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_mu(
@@ -580,7 +580,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_mu(
@@ -589,7 +589,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_mu(
@@ -598,7 +598,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_mu(
@@ -607,7 +607,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_mu(
@@ -616,7 +616,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_mu(
@@ -625,7 +625,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_mu(
@@ -634,7 +634,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_mu(
@@ -643,7 +643,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_mu(
@@ -652,6 +652,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul.c
index f607af4ddbd8..d8a42ef3634d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_tu(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_tu(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_tu(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_tu(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_tu(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_tu(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_tu(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_tu(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_tu(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tu(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_tu(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_tu(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_tu(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_tu(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_tum(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_tum(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_tum(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_tum(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_tum(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_tum(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_tum(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tum(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tum(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_tum(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_tum(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_tum(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_tum(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_tum(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_tum(
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_tumu(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_tumu(
@@ -364,7 +364,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_tumu(
@@ -373,7 +373,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_tumu(
@@ -382,7 +382,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_tumu(
@@ -391,7 +391,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_tumu(
@@ -400,7 +400,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_tumu(
@@ -409,7 +409,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_tumu(
@@ -418,7 +418,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tumu(
@@ -427,7 +427,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tumu(
@@ -436,7 +436,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_tumu(
@@ -445,7 +445,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_tumu(
@@ -454,7 +454,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_tumu(
@@ -463,7 +463,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_tumu(
@@ -472,7 +472,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_tumu(
@@ -481,7 +481,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_tumu(
@@ -490,7 +490,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_mu(
@@ -499,7 +499,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_mu(
@@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_mu(
@@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_mu(
@@ -526,7 +526,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_mu(
@@ -535,7 +535,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_mu(
@@ -544,7 +544,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_mu(
@@ -553,7 +553,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_mu(
@@ -562,7 +562,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_mu(
@@ -571,7 +571,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_mu(
@@ -580,7 +580,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_mu(
@@ -589,7 +589,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_mu(
@@ -598,7 +598,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_mu(
@@ -607,7 +607,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_mu(
@@ -616,7 +616,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_mu(
@@ -625,7 +625,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_mu(
@@ -634,7 +634,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_mu(
@@ -643,7 +643,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_mu(
@@ -652,6 +652,6 @@ vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc.c
index 514473d2d27e..59a26917d056 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_tu(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_tu(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_tu(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_tu(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_tu(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_tu(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_tu(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_tu(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_tu(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tu(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_tu(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_tu(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_tu(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_tu(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_tum(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_tum(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_tum(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_tum(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_tum(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_tum(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_tum(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tum(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tum(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_tum(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_tum(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_tum(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_tum(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_tum(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_tum(
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_tumu(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Fl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_tumu(
@@ -364,7 +364,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_tumu(
@@ -373,7 +373,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_tumu(
@@ -382,7 +382,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_tumu(
@@ -391,7 +391,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_tumu(
@@ -400,7 +400,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_tumu(
@@ -409,7 +409,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_tumu(
@@ -418,7 +418,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tumu(
@@ -427,7 +427,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tumu(
@@ -436,7 +436,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_tumu(
@@ -445,7 +445,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_tumu(
@@ -454,7 +454,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_tumu(
@@ -463,7 +463,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_tumu(
@@ -472,7 +472,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_tumu(
@@ -481,7 +481,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_tumu(
@@ -490,7 +490,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_mu(
@@ -499,7 +499,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_mu(
@@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_mu(
@@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_mu(
@@ -526,7 +526,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_mu(
@@ -535,7 +535,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_mu(
@@ -544,7 +544,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_mu(
@@ -553,7 +553,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_mu(
@@ -562,7 +562,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_mu(
@@ -571,7 +571,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_mu(
@@ -580,7 +580,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_mu(
@@ -589,7 +589,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_mu(
@@ -598,7 +598,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_mu(
@@ -607,7 +607,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_mu(
@@ -616,7 +616,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_mu(
@@ -625,7 +625,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_mu(
@@ -634,7 +634,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_mu(
@@ -643,7 +643,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_mu(
@@ -652,6 +652,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac.c
index b0927189a993..103486243f7c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_tu(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_tu(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_tu(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_tu(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_tu(
@@ -58,7 +58,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_tu(
@@ -67,7 +67,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_tu(
@@ -76,7 +76,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_tu(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_tu(
@@ -94,7 +94,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tu(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_tu(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_tu(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_tu(
@@ -157,7 +157,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_tu(
@@ -166,7 +166,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_tum(
@@ -175,7 +175,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_tum(
@@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_tum(
@@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Flo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_tum(
@@ -202,7 +202,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_tum(
@@ -211,7 +211,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_tum(
@@ -229,7 +229,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_tum(
@@ -238,7 +238,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_tum(
@@ -247,7 +247,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_tum(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tum(
@@ -265,7 +265,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tum(
@@ -274,7 +274,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_tum(
@@ -283,7 +283,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_tum(
@@ -292,7 +292,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_tum(
@@ -301,7 +301,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_tum(
@@ -310,7 +310,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_tum(
@@ -319,7 +319,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_tum(
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_tumu(
@@ -337,7 +337,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_tumu(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_tumu(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Fl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_tumu(
@@ -364,7 +364,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_tumu(
@@ -373,7 +373,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_tumu(
@@ -382,7 +382,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_tumu(
@@ -391,7 +391,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_tumu(
@@ -400,7 +400,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_tumu(
@@ -409,7 +409,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_tumu(
@@ -418,7 +418,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tumu(
@@ -427,7 +427,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float1
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tumu(
@@ -436,7 +436,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_tumu(
@@ -445,7 +445,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_tumu(
@@ -454,7 +454,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_tumu(
@@ -463,7 +463,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_tumu(
@@ -472,7 +472,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_tumu(
@@ -481,7 +481,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_tumu(
@@ -490,7 +490,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_mu(
@@ -499,7 +499,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_mu(
@@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_mu(
@@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Floa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_mu(
@@ -526,7 +526,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_mu(
@@ -535,7 +535,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_mu(
@@ -544,7 +544,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_mu(
@@ -553,7 +553,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_mu(
@@ -562,7 +562,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_mu(
@@ -571,7 +571,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_mu(
@@ -580,7 +580,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_mu(
@@ -589,7 +589,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_mu(
@@ -598,7 +598,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_mu(
@@ -607,7 +607,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_mu(
@@ -616,7 +616,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_mu(
@@ -625,7 +625,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_mu(
@@ -634,7 +634,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_mu(
@@ -643,7 +643,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_mu(
@@ -652,6 +652,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) {
- return vfwnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredosum.c
index 08261cc26073..fe898a65cd83 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredosum.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_tu(
@@ -22,7 +22,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_tu(
@@ -31,7 +31,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_tu(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_tu(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_tu(
@@ -58,7 +58,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_tu(
@@ -76,7 +76,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_tu(
@@ -85,7 +85,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_tu(
@@ -94,7 +94,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_tu(
@@ -103,7 +103,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_tum(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_tum(
@@ -121,7 +121,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_tum(
@@ -130,7 +130,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_tum(
@@ -139,7 +139,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_tum(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_tum(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum(
@@ -166,7 +166,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_tum(
@@ -175,7 +175,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t ma
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_tum(
@@ -184,7 +184,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_tum(
@@ -193,7 +193,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_tum(
@@ -202,6 +202,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredusum.c
index 91d92f925116..1245bfd9d6e7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredusum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwredusum.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_tu(
@@ -22,7 +22,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_tu(
@@ -31,7 +31,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_tu(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_tu(
@@ -49,7 +49,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_tu(
@@ -58,7 +58,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_tu(
@@ -76,7 +76,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_tu(
@@ -85,7 +85,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_tu(
@@ -94,7 +94,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_tu(
@@ -103,7 +103,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_tum(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_tum(
@@ -121,7 +121,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_tum(
@@ -130,7 +130,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t ma
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_tum(
@@ -139,7 +139,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_tum(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_tum(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tum(
@@ -166,7 +166,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_tum(
@@ -175,7 +175,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t ma
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_tum(
@@ -184,7 +184,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_tum(
@@ -193,7 +193,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t mas
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_tum(
@@ -202,6 +202,6 @@ vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t mask
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
- return vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub.c
index a27914cd97c1..2886faec0c9e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_tu(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_tu(
@@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_tu(
@@ -40,7 +40,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_tu(
@@ -49,7 +49,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_tu(
@@ -58,7 +58,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_tu(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_tu(
@@ -103,7 +103,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_tu(
@@ -112,7 +112,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_tu(
@@ -121,7 +121,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_tu(
@@ -130,7 +130,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_tu(
@@ -139,7 +139,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_tu(
@@ -148,7 +148,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_tu(
@@ -157,7 +157,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_tu(
@@ -166,7 +166,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_tu(
@@ -175,7 +175,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_tu(
@@ -184,7 +184,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tu(
@@ -193,7 +193,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, _
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tu(
@@ -202,7 +202,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwsub_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwsub_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwsub_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_tu(
@@ -256,7 +256,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwsub_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_tu(
@@ -265,7 +265,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_tu(
@@ -274,7 +274,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwsub_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_tu(
@@ -283,7 +283,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_tu(
@@ -292,7 +292,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwsub_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_tu(
@@ -301,7 +301,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_tu(
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwsub_vf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_tu(
@@ -319,7 +319,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_tu(
@@ -328,7 +328,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwsub_wf_tu(maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_tum(
@@ -337,7 +337,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, f
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_tum(
@@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_tum(
@@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_tum(
@@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_tum(
@@ -373,7 +373,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_tum(
@@ -382,7 +382,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_tum(
@@ -391,7 +391,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_tum(
@@ -400,7 +400,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_tum(
@@ -409,7 +409,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_tum(
@@ -418,7 +418,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_tum(
@@ -427,7 +427,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_tum(
@@ -436,7 +436,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_tum(
@@ -445,7 +445,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_tum(
@@ -454,7 +454,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_tum(
@@ -463,7 +463,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_tum(
@@ -472,7 +472,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_tum(
@@ -481,7 +481,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_tum(
@@ -490,7 +490,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_tum(
@@ -499,7 +499,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_tum(
@@ -508,7 +508,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tum(
@@ -517,7 +517,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tum(
@@ -526,7 +526,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tum(
@@ -535,7 +535,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tum(
@@ -544,7 +544,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_tum(
@@ -553,7 +553,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_tum(
@@ -562,7 +562,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_tum(
@@ -571,7 +571,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_tum(
@@ -580,7 +580,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_tum(
@@ -589,7 +589,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_tum(
@@ -598,7 +598,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_tum(
@@ -607,7 +607,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_tum(
@@ -616,7 +616,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_tum(
@@ -625,7 +625,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_tum(
@@ -634,7 +634,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_tum(
@@ -643,7 +643,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_tum(
@@ -652,7 +652,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_tumu(
@@ -661,7 +661,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_tumu(
@@ -670,7 +670,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_tumu(
@@ -679,7 +679,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_tumu(
@@ -688,7 +688,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_tumu(
@@ -697,7 +697,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_tumu(
@@ -706,7 +706,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_tumu(
@@ -715,7 +715,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_tumu(
@@ -724,7 +724,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_tumu(
@@ -733,7 +733,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_tumu(
@@ -742,7 +742,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_tumu(
@@ -751,7 +751,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_tumu(
@@ -760,7 +760,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_tumu(
@@ -769,7 +769,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_tumu(
@@ -778,7 +778,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_tumu(
@@ -787,7 +787,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_tumu(
@@ -796,7 +796,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_tumu(
@@ -805,7 +805,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_tumu(
@@ -814,7 +814,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_tumu(
@@ -823,7 +823,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_tumu(
@@ -832,7 +832,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tumu(
@@ -841,7 +841,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tumu(
@@ -850,7 +850,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tumu(
@@ -859,7 +859,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tumu(
@@ -868,7 +868,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_tumu(
@@ -877,7 +877,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_tumu(
@@ -886,7 +886,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_tumu(
@@ -895,7 +895,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_tumu(
@@ -904,7 +904,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_tumu(
@@ -913,7 +913,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_tumu(
@@ -922,7 +922,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_tumu(
@@ -931,7 +931,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_tumu(
@@ -940,7 +940,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_tumu(
@@ -949,7 +949,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_tumu(
@@ -958,7 +958,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_tumu(
@@ -967,7 +967,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_tumu(
@@ -976,7 +976,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_mu(
@@ -985,7 +985,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_mu(
@@ -994,7 +994,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_mu(
@@ -1003,7 +1003,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
- return vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_mu(
@@ -1012,7 +1012,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_mu(
@@ -1021,7 +1021,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_mu(
@@ -1030,7 +1030,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_mu(
@@ -1039,7 +1039,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
- return vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_mu(
@@ -1048,7 +1048,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_mu(
@@ -1057,7 +1057,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_mu(
@@ -1066,7 +1066,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_mu(
@@ -1075,7 +1075,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
- return vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_mu(
@@ -1084,7 +1084,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_mu(
@@ -1093,7 +1093,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_mu(
@@ -1102,7 +1102,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_mu(
@@ -1111,7 +1111,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
- return vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_mu(
@@ -1120,7 +1120,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_mu(
@@ -1129,7 +1129,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_mu(
@@ -1138,7 +1138,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_mu(
@@ -1147,7 +1147,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
- return vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_mu(
@@ -1156,7 +1156,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
- return vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_mu(
@@ -1165,7 +1165,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_mu(
@@ -1174,7 +1174,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_mu(
@@ -1183,7 +1183,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_mu(
@@ -1192,7 +1192,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) {
- return vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_mu(
@@ -1201,7 +1201,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_mu(
@@ -1210,7 +1210,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_mu(
@@ -1219,7 +1219,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) {
- return vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_mu(
@@ -1228,7 +1228,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) {
- return vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_mu(
@@ -1237,7 +1237,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_mu(
@@ -1246,7 +1246,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_mu(
@@ -1255,7 +1255,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) {
- return vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_mu(
@@ -1264,7 +1264,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) {
- return vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_mu(
@@ -1273,7 +1273,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_mu(
@@ -1282,7 +1282,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_mu(
@@ -1291,7 +1291,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) {
- return vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_mu(
@@ -1300,6 +1300,6 @@ vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) {
- return vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vid.c
index bca3173e5fd2..43377c5c7a53 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vid.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vid.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vid_v_u8mf8_tu(vuint8mf8_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8mf4_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vid_v_u8mf8_tu(vuint8mf8_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vid_v_u8mf4_tu(vuint8mf4_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8mf2_tu(
@@ -30,7 +30,7 @@ vuint8mf4_t test_vid_v_u8mf4_tu(vuint8mf4_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vid_v_u8mf2_tu(vuint8mf2_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m1_tu(
@@ -39,7 +39,7 @@ vuint8mf2_t test_vid_v_u8mf2_tu(vuint8mf2_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vid_v_u8m1_tu(vuint8m1_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m2_tu(
@@ -48,7 +48,7 @@ vuint8m1_t test_vid_v_u8m1_tu(vuint8m1_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vid_v_u8m2_tu(vuint8m2_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m4_tu(
@@ -57,7 +57,7 @@ vuint8m2_t test_vid_v_u8m2_tu(vuint8m2_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vid_v_u8m4_tu(vuint8m4_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m8_tu(
@@ -66,7 +66,7 @@ vuint8m4_t test_vid_v_u8m4_tu(vuint8m4_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vid_v_u8m8_tu(vuint8m8_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16mf4_tu(
@@ -75,7 +75,7 @@ vuint8m8_t test_vid_v_u8m8_tu(vuint8m8_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vid_v_u16mf4_tu(vuint16mf4_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16mf2_tu(
@@ -84,7 +84,7 @@ vuint16mf4_t test_vid_v_u16mf4_tu(vuint16mf4_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vid_v_u16mf2_tu(vuint16mf2_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m1_tu(
@@ -93,7 +93,7 @@ vuint16mf2_t test_vid_v_u16mf2_tu(vuint16mf2_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vid_v_u16m1_tu(vuint16m1_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m2_tu(
@@ -102,7 +102,7 @@ vuint16m1_t test_vid_v_u16m1_tu(vuint16m1_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vid_v_u16m2_tu(vuint16m2_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m4_tu(
@@ -111,7 +111,7 @@ vuint16m2_t test_vid_v_u16m2_tu(vuint16m2_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vid_v_u16m4_tu(vuint16m4_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m8_tu(
@@ -120,7 +120,7 @@ vuint16m4_t test_vid_v_u16m4_tu(vuint16m4_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vid_v_u16m8_tu(vuint16m8_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tu(
@@ -129,7 +129,7 @@ vuint16m8_t test_vid_v_u16m8_tu(vuint16m8_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m1_tu(
@@ -138,7 +138,7 @@ vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vid_v_u32m1_tu(vuint32m1_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m2_tu(
@@ -147,7 +147,7 @@ vuint32m1_t test_vid_v_u32m1_tu(vuint32m1_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vid_v_u32m2_tu(vuint32m2_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m4_tu(
@@ -156,7 +156,7 @@ vuint32m2_t test_vid_v_u32m2_tu(vuint32m2_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vid_v_u32m4_tu(vuint32m4_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m8_tu(
@@ -165,7 +165,7 @@ vuint32m4_t test_vid_v_u32m4_tu(vuint32m4_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vid_v_u32m8_tu(vuint32m8_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m1_tu(
@@ -174,7 +174,7 @@ vuint32m8_t test_vid_v_u32m8_tu(vuint32m8_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vid_v_u64m1_tu(vuint64m1_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m2_tu(
@@ -183,7 +183,7 @@ vuint64m1_t test_vid_v_u64m1_tu(vuint64m1_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vid_v_u64m2_tu(vuint64m2_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m4_tu(
@@ -192,7 +192,7 @@ vuint64m2_t test_vid_v_u64m2_tu(vuint64m2_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vid_v_u64m4_tu(vuint64m4_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m8_tu(
@@ -201,7 +201,7 @@ vuint64m4_t test_vid_v_u64m4_tu(vuint64m4_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vid_v_u64m8_tu(vuint64m8_t maskedoff, size_t vl) {
- return vid_tu(maskedoff, vl);
+ return __riscv_vid_tu(maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8mf8_tum(
@@ -210,7 +210,7 @@ vuint64m8_t test_vid_v_u64m8_tu(vuint64m8_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vid_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8mf4_tum(
@@ -219,7 +219,7 @@ vuint8mf8_t test_vid_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vid_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8mf2_tum(
@@ -228,7 +228,7 @@ vuint8mf4_t test_vid_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vid_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m1_tum(
@@ -237,7 +237,7 @@ vuint8mf2_t test_vid_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vid_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m2_tum(
@@ -246,7 +246,7 @@ vuint8m1_t test_vid_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vid_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m4_tum(
@@ -255,7 +255,7 @@ vuint8m2_t test_vid_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vid_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m8_tum(
@@ -264,7 +264,7 @@ vuint8m4_t test_vid_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vid_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16mf4_tum(
@@ -273,7 +273,7 @@ vuint8m8_t test_vid_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vid_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16mf2_tum(
@@ -282,7 +282,7 @@ vuint16mf4_t test_vid_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vid_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m1_tum(
@@ -291,7 +291,7 @@ vuint16mf2_t test_vid_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vid_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m2_tum(
@@ -300,7 +300,7 @@ vuint16m1_t test_vid_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vid_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m4_tum(
@@ -309,7 +309,7 @@ vuint16m2_t test_vid_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vid_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m8_tum(
@@ -318,7 +318,7 @@ vuint16m4_t test_vid_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vid_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tum(
@@ -327,7 +327,7 @@ vuint16m8_t test_vid_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vid_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m1_tum(
@@ -336,7 +336,7 @@ vuint32mf2_t test_vid_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vid_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m2_tum(
@@ -345,7 +345,7 @@ vuint32m1_t test_vid_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vid_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m4_tum(
@@ -354,7 +354,7 @@ vuint32m2_t test_vid_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vid_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m8_tum(
@@ -363,7 +363,7 @@ vuint32m4_t test_vid_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vid_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m1_tum(
@@ -372,7 +372,7 @@ vuint32m8_t test_vid_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vid_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m2_tum(
@@ -381,7 +381,7 @@ vuint64m1_t test_vid_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vid_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m4_tum(
@@ -390,7 +390,7 @@ vuint64m2_t test_vid_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vid_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m8_tum(
@@ -399,7 +399,7 @@ vuint64m4_t test_vid_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vid_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) {
- return vid_tum(mask, maskedoff, vl);
+ return __riscv_vid_tum(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8mf8_tumu(
@@ -408,7 +408,7 @@ vuint64m8_t test_vid_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vid_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8mf4_tumu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vid_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vid_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8mf2_tumu(
@@ -426,7 +426,7 @@ vuint8mf4_t test_vid_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vid_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m1_tumu(
@@ -435,7 +435,7 @@ vuint8mf2_t test_vid_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vid_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m2_tumu(
@@ -444,7 +444,7 @@ vuint8m1_t test_vid_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vid_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m4_tumu(
@@ -453,7 +453,7 @@ vuint8m2_t test_vid_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vid_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u8m8_tumu(
@@ -462,7 +462,7 @@ vuint8m4_t test_vid_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vid_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16mf4_tumu(
@@ -471,7 +471,7 @@ vuint8m8_t test_vid_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vid_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16mf2_tumu(
@@ -480,7 +480,7 @@ vuint16mf4_t test_vid_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vid_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m1_tumu(
@@ -489,7 +489,7 @@ vuint16mf2_t test_vid_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vid_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m2_tumu(
@@ -498,7 +498,7 @@ vuint16m1_t test_vid_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vid_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m4_tumu(
@@ -507,7 +507,7 @@ vuint16m2_t test_vid_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vid_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u16m8_tumu(
@@ -516,7 +516,7 @@ vuint16m4_t test_vid_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vid_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tumu(
@@ -525,7 +525,7 @@ vuint16m8_t test_vid_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m1_tumu(
@@ -534,7 +534,7 @@ vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vid_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m2_tumu(
@@ -543,7 +543,7 @@ vuint32m1_t test_vid_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vid_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m4_tumu(
@@ -552,7 +552,7 @@ vuint32m2_t test_vid_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vid_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u32m8_tumu(
@@ -561,7 +561,7 @@ vuint32m4_t test_vid_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vid_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m1_tumu(
@@ -570,7 +570,7 @@ vuint32m8_t test_vid_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vid_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m2_tumu(
@@ -579,7 +579,7 @@ vuint64m1_t test_vid_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vid_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m4_tumu(
@@ -588,7 +588,7 @@ vuint64m2_t test_vid_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vid_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
// CHECK-RV64-LABEL: @test_vid_v_u64m8_tumu(
@@ -597,6 +597,6 @@ vuint64m4_t test_vid_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vid_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) {
- return vid_tumu(mask, maskedoff, vl);
+ return __riscv_vid_tumu(mask, maskedoff, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/viota.c
index 46396da815fb..985a70d84494 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/viota.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/viota.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_viota_m_u8mf8_tu(vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8mf4_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_viota_m_u8mf8_tu(vuint8mf8_t maskedoff, vbool64_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_viota_m_u8mf4_tu(vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8mf2_tu(
@@ -30,7 +30,7 @@ vuint8mf4_t test_viota_m_u8mf4_tu(vuint8mf4_t maskedoff, vbool32_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_viota_m_u8mf2_tu(vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m1_tu(
@@ -39,7 +39,7 @@ vuint8mf2_t test_viota_m_u8mf2_tu(vuint8mf2_t maskedoff, vbool16_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_viota_m_u8m1_tu(vuint8m1_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m2_tu(
@@ -48,7 +48,7 @@ vuint8m1_t test_viota_m_u8m1_tu(vuint8m1_t maskedoff, vbool8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_viota_m_u8m2_tu(vuint8m2_t maskedoff, vbool4_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m4_tu(
@@ -57,7 +57,7 @@ vuint8m2_t test_viota_m_u8m2_tu(vuint8m2_t maskedoff, vbool4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_viota_m_u8m4_tu(vuint8m4_t maskedoff, vbool2_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m8_tu(
@@ -66,7 +66,7 @@ vuint8m4_t test_viota_m_u8m4_tu(vuint8m4_t maskedoff, vbool2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_viota_m_u8m8_tu(vuint8m8_t maskedoff, vbool1_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16mf4_tu(
@@ -75,7 +75,7 @@ vuint8m8_t test_viota_m_u8m8_tu(vuint8m8_t maskedoff, vbool1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_viota_m_u16mf4_tu(vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16mf2_tu(
@@ -84,7 +84,7 @@ vuint16mf4_t test_viota_m_u16mf4_tu(vuint16mf4_t maskedoff, vbool64_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_viota_m_u16mf2_tu(vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m1_tu(
@@ -93,7 +93,7 @@ vuint16mf2_t test_viota_m_u16mf2_tu(vuint16mf2_t maskedoff, vbool32_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_viota_m_u16m1_tu(vuint16m1_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m2_tu(
@@ -102,7 +102,7 @@ vuint16m1_t test_viota_m_u16m1_tu(vuint16m1_t maskedoff, vbool16_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_viota_m_u16m2_tu(vuint16m2_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m4_tu(
@@ -111,7 +111,7 @@ vuint16m2_t test_viota_m_u16m2_tu(vuint16m2_t maskedoff, vbool8_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_viota_m_u16m4_tu(vuint16m4_t maskedoff, vbool4_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m8_tu(
@@ -120,7 +120,7 @@ vuint16m4_t test_viota_m_u16m4_tu(vuint16m4_t maskedoff, vbool4_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_viota_m_u16m8_tu(vuint16m8_t maskedoff, vbool2_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tu(
@@ -129,7 +129,7 @@ vuint16m8_t test_viota_m_u16m8_tu(vuint16m8_t maskedoff, vbool2_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m1_tu(
@@ -138,7 +138,7 @@ vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t maskedoff, vbool64_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_viota_m_u32m1_tu(vuint32m1_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m2_tu(
@@ -147,7 +147,7 @@ vuint32m1_t test_viota_m_u32m1_tu(vuint32m1_t maskedoff, vbool32_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_viota_m_u32m2_tu(vuint32m2_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m4_tu(
@@ -156,7 +156,7 @@ vuint32m2_t test_viota_m_u32m2_tu(vuint32m2_t maskedoff, vbool16_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_viota_m_u32m4_tu(vuint32m4_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m8_tu(
@@ -165,7 +165,7 @@ vuint32m4_t test_viota_m_u32m4_tu(vuint32m4_t maskedoff, vbool8_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_viota_m_u32m8_tu(vuint32m8_t maskedoff, vbool4_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m1_tu(
@@ -174,7 +174,7 @@ vuint32m8_t test_viota_m_u32m8_tu(vuint32m8_t maskedoff, vbool4_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_viota_m_u64m1_tu(vuint64m1_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m2_tu(
@@ -183,7 +183,7 @@ vuint64m1_t test_viota_m_u64m1_tu(vuint64m1_t maskedoff, vbool64_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_viota_m_u64m2_tu(vuint64m2_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m4_tu(
@@ -192,7 +192,7 @@ vuint64m2_t test_viota_m_u64m2_tu(vuint64m2_t maskedoff, vbool32_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_viota_m_u64m4_tu(vuint64m4_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m8_tu(
@@ -201,7 +201,7 @@ vuint64m4_t test_viota_m_u64m4_tu(vuint64m4_t maskedoff, vbool16_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_viota_m_u64m8_tu(vuint64m8_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tu(maskedoff, op1, vl);
+ return __riscv_viota_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8mf8_tum(
@@ -210,7 +210,7 @@ vuint64m8_t test_viota_m_u64m8_tu(vuint64m8_t maskedoff, vbool8_t op1, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8mf4_tum(
@@ -219,7 +219,7 @@ vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vbool6
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8mf2_tum(
@@ -228,7 +228,7 @@ vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vbool3
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m1_tum(
@@ -237,7 +237,7 @@ vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vbool1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_viota_m_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m2_tum(
@@ -246,7 +246,7 @@ vuint8m1_t test_viota_m_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_viota_m_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m4_tum(
@@ -255,7 +255,7 @@ vuint8m2_t test_viota_m_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_viota_m_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m8_tum(
@@ -264,7 +264,7 @@ vuint8m4_t test_viota_m_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_viota_m_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16mf4_tum(
@@ -273,7 +273,7 @@ vuint8m8_t test_viota_m_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16mf2_tum(
@@ -282,7 +282,7 @@ vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vbo
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m1_tum(
@@ -291,7 +291,7 @@ vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vbo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_viota_m_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m2_tum(
@@ -300,7 +300,7 @@ vuint16m1_t test_viota_m_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vbool1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_viota_m_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m4_tum(
@@ -309,7 +309,7 @@ vuint16m2_t test_viota_m_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vbool8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_viota_m_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m8_tum(
@@ -318,7 +318,7 @@ vuint16m4_t test_viota_m_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vbool4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_viota_m_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tum(
@@ -327,7 +327,7 @@ vuint16m8_t test_viota_m_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vbool2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m1_tum(
@@ -336,7 +336,7 @@ vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vbo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_viota_m_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m2_tum(
@@ -345,7 +345,7 @@ vuint32m1_t test_viota_m_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vbool3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_viota_m_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m4_tum(
@@ -354,7 +354,7 @@ vuint32m2_t test_viota_m_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vbool1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_viota_m_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m8_tum(
@@ -363,7 +363,7 @@ vuint32m4_t test_viota_m_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vbool8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_viota_m_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m1_tum(
@@ -372,7 +372,7 @@ vuint32m8_t test_viota_m_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vbool4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_viota_m_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m2_tum(
@@ -381,7 +381,7 @@ vuint64m1_t test_viota_m_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vbool6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_viota_m_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m4_tum(
@@ -390,7 +390,7 @@ vuint64m2_t test_viota_m_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vbool3
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_viota_m_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m8_tum(
@@ -399,7 +399,7 @@ vuint64m4_t test_viota_m_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vbool1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_viota_m_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tum(mask, maskedoff, op1, vl);
+ return __riscv_viota_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8mf8_tumu(
@@ -408,7 +408,7 @@ vuint64m8_t test_viota_m_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vbool8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8mf4_tumu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vbool
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8mf2_tumu(
@@ -426,7 +426,7 @@ vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vbool
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m1_tumu(
@@ -435,7 +435,7 @@ vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vbool
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m2_tumu(
@@ -444,7 +444,7 @@ vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m4_tumu(
@@ -453,7 +453,7 @@ vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u8m8_tumu(
@@ -462,7 +462,7 @@ vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16mf4_tumu(
@@ -471,7 +471,7 @@ vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16mf2_tumu(
@@ -480,7 +480,7 @@ vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vb
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m1_tumu(
@@ -489,7 +489,7 @@ vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vb
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m2_tumu(
@@ -498,7 +498,7 @@ vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vbool
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m4_tumu(
@@ -507,7 +507,7 @@ vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vbool8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u16m8_tumu(
@@ -516,7 +516,7 @@ vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vbool4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tumu(
@@ -525,7 +525,7 @@ vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vbool2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m1_tumu(
@@ -534,7 +534,7 @@ vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vb
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m2_tumu(
@@ -543,7 +543,7 @@ vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vbool
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m4_tumu(
@@ -552,7 +552,7 @@ vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vbool
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u32m8_tumu(
@@ -561,7 +561,7 @@ vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vbool8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m1_tumu(
@@ -570,7 +570,7 @@ vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vbool4
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m2_tumu(
@@ -579,7 +579,7 @@ vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vbool
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m4_tumu(
@@ -588,7 +588,7 @@ vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vbool
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_viota_m_u64m8_tumu(
@@ -597,6 +597,6 @@ vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vbool
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) {
- return viota_tumu(mask, maskedoff, op1, vl);
+ return __riscv_viota_tumu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16.c
index 5d2ef1d6d79f..f05b2751e7fd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_tu(
@@ -76,7 +76,7 @@ vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m1_tu(
@@ -85,7 +85,7 @@ vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vle16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m2_tu(
@@ -94,7 +94,7 @@ vint16m1_t test_vle16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vle16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m4_tu(
@@ -103,7 +103,7 @@ vint16m2_t test_vle16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vle16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m8_tu(
@@ -112,7 +112,7 @@ vint16m4_t test_vle16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vle16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_tu(
@@ -121,7 +121,7 @@ vint16m8_t test_vle16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_tu(
@@ -130,7 +130,7 @@ vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m1_tu(
@@ -139,7 +139,7 @@ vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m2_tu(
@@ -148,7 +148,7 @@ vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m4_tu(
@@ -157,7 +157,7 @@ vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m8_tu(
@@ -166,7 +166,7 @@ vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tu(maskedoff, base, vl);
+ return __riscv_vle16_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_tum(
@@ -175,7 +175,7 @@ vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_tum(
@@ -184,7 +184,7 @@ vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m1_tum(
@@ -193,7 +193,7 @@ vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m2_tum(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m4_tum(
@@ -211,7 +211,7 @@ vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m8_tum(
@@ -220,7 +220,7 @@ vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_tum(
@@ -229,7 +229,7 @@ vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_tum(
@@ -238,7 +238,7 @@ vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m1_tum(
@@ -247,7 +247,7 @@ vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vle16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m2_tum(
@@ -256,7 +256,7 @@ vint16m1_t test_vle16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vle16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m4_tum(
@@ -265,7 +265,7 @@ vint16m2_t test_vle16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vle16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m8_tum(
@@ -274,7 +274,7 @@ vint16m4_t test_vle16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vle16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_tum(
@@ -283,7 +283,7 @@ vint16m8_t test_vle16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_tum(
@@ -292,7 +292,7 @@ vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m1_tum(
@@ -301,7 +301,7 @@ vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m2_tum(
@@ -310,7 +310,7 @@ vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m4_tum(
@@ -319,7 +319,7 @@ vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m8_tum(
@@ -328,7 +328,7 @@ vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tum(mask, maskedoff, base, vl);
+ return __riscv_vle16_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_tumu(
@@ -337,7 +337,7 @@ vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_tumu(
@@ -346,7 +346,7 @@ vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m1_tumu(
@@ -355,7 +355,7 @@ vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m2_tumu(
@@ -364,7 +364,7 @@ vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m4_tumu(
@@ -373,7 +373,7 @@ vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_f16m8_tumu(
@@ -382,7 +382,7 @@ vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_tumu(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_tumu(
@@ -400,7 +400,7 @@ vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m1_tumu(
@@ -409,7 +409,7 @@ vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m2_tumu(
@@ -418,7 +418,7 @@ vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m4_tumu(
@@ -427,7 +427,7 @@ vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_i16m8_tumu(
@@ -436,7 +436,7 @@ vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_tumu(
@@ -445,7 +445,7 @@ vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_tumu(
@@ -454,7 +454,7 @@ vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m1_tumu(
@@ -463,7 +463,7 @@ vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m2_tumu(
@@ -472,7 +472,7 @@ vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m4_tumu(
@@ -481,7 +481,7 @@ vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle16_v_u16m8_tumu(
@@ -490,6 +490,6 @@ vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) {
- return vle16_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle16_tumu(mask, maskedoff, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16ff.c
index 653f9c85e339..8db2306c1ae0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16ff.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP1]]
//
vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_tu(
@@ -28,7 +28,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP1]]
//
vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP1]]
//
vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_tu(
@@ -52,7 +52,7 @@ vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP1]]
//
vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_tu(
@@ -64,7 +64,7 @@ vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP1]]
//
vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_tu(
@@ -76,7 +76,7 @@ vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP1]]
//
vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_tu(
@@ -88,7 +88,7 @@ vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_tu(
@@ -100,7 +100,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_tu(
@@ -112,7 +112,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_tu(
@@ -124,7 +124,7 @@ vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_tu(
@@ -136,7 +136,7 @@ vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_tu(
@@ -148,7 +148,7 @@ vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_tu(
@@ -160,7 +160,7 @@ vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_tu(
@@ -172,7 +172,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_tu(
@@ -184,7 +184,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *ba
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_tu(
@@ -196,7 +196,7 @@ vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_tu(
@@ -208,7 +208,7 @@ vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_tu(
@@ -220,7 +220,7 @@ vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_tum(
@@ -232,7 +232,7 @@ vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP1]]
//
vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_tum(
@@ -244,7 +244,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP1]]
//
vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_tum(
@@ -256,7 +256,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP1]]
//
vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_tum(
@@ -268,7 +268,7 @@ vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP1]]
//
vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_tum(
@@ -280,7 +280,7 @@ vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP1]]
//
vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_tum(
@@ -292,7 +292,7 @@ vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP1]]
//
vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_tum(
@@ -304,7 +304,7 @@ vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_tum(
@@ -316,7 +316,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_tum(
@@ -328,7 +328,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_tum(
@@ -340,7 +340,7 @@ vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_tum(
@@ -352,7 +352,7 @@ vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_tum(
@@ -364,7 +364,7 @@ vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_tum(
@@ -376,7 +376,7 @@ vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_tum(
@@ -388,7 +388,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_tum(
@@ -400,7 +400,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_tum(
@@ -412,7 +412,7 @@ vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_tum(
@@ -424,7 +424,7 @@ vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_tum(
@@ -436,7 +436,7 @@ vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_tumu(
@@ -448,7 +448,7 @@ vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP1]]
//
vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_tumu(
@@ -460,7 +460,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP1]]
//
vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_tumu(
@@ -472,7 +472,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP1]]
//
vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_tumu(
@@ -484,7 +484,7 @@ vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP1]]
//
vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_tumu(
@@ -496,7 +496,7 @@ vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP1]]
//
vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_tumu(
@@ -508,7 +508,7 @@ vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP1]]
//
vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_tumu(
@@ -520,7 +520,7 @@ vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_tumu(
@@ -532,7 +532,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_tumu(
@@ -544,7 +544,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_tumu(
@@ -556,7 +556,7 @@ vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_tumu(
@@ -568,7 +568,7 @@ vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_tumu(
@@ -580,7 +580,7 @@ vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_tumu(
@@ -592,7 +592,7 @@ vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_tumu(
@@ -604,7 +604,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_tumu(
@@ -616,7 +616,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_tumu(
@@ -628,7 +628,7 @@ vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_tumu(
@@ -640,7 +640,7 @@ vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_tumu(
@@ -652,6 +652,6 @@ vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32.c
index 71a7b860f5ad..57e8ed30d3e6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m1_tu(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m2_tu(
@@ -31,7 +31,7 @@ vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, si
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m4_tu(
@@ -40,7 +40,7 @@ vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, si
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m8_tu(
@@ -49,7 +49,7 @@ vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, si
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_tu(
@@ -58,7 +58,7 @@ vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m1_tu(
@@ -67,7 +67,7 @@ vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vle32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m2_tu(
@@ -76,7 +76,7 @@ vint32m1_t test_vle32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vle32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m4_tu(
@@ -85,7 +85,7 @@ vint32m2_t test_vle32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vle32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m8_tu(
@@ -94,7 +94,7 @@ vint32m4_t test_vle32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vle32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_tu(
@@ -103,7 +103,7 @@ vint32m8_t test_vle32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m1_tu(
@@ -112,7 +112,7 @@ vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m2_tu(
@@ -121,7 +121,7 @@ vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m4_tu(
@@ -130,7 +130,7 @@ vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m8_tu(
@@ -139,7 +139,7 @@ vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tu(maskedoff, base, vl);
+ return __riscv_vle32_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32mf2_tum(
@@ -148,7 +148,7 @@ vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m1_tum(
@@ -157,7 +157,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m2_tum(
@@ -166,7 +166,7 @@ vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m4_tum(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m8_tum(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_tum(
@@ -193,7 +193,7 @@ vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m1_tum(
@@ -202,7 +202,7 @@ vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vle32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m2_tum(
@@ -211,7 +211,7 @@ vint32m1_t test_vle32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vle32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m4_tum(
@@ -220,7 +220,7 @@ vint32m2_t test_vle32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vle32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m8_tum(
@@ -229,7 +229,7 @@ vint32m4_t test_vle32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vle32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_tum(
@@ -238,7 +238,7 @@ vint32m8_t test_vle32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m1_tum(
@@ -247,7 +247,7 @@ vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m2_tum(
@@ -256,7 +256,7 @@ vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m4_tum(
@@ -265,7 +265,7 @@ vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m8_tum(
@@ -274,7 +274,7 @@ vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tum(mask, maskedoff, base, vl);
+ return __riscv_vle32_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32mf2_tumu(
@@ -283,7 +283,7 @@ vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m1_tumu(
@@ -292,7 +292,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m2_tumu(
@@ -301,7 +301,7 @@ vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m4_tumu(
@@ -310,7 +310,7 @@ vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_f32m8_tumu(
@@ -319,7 +319,7 @@ vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_tumu(
@@ -328,7 +328,7 @@ vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m1_tumu(
@@ -337,7 +337,7 @@ vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m2_tumu(
@@ -346,7 +346,7 @@ vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m4_tumu(
@@ -355,7 +355,7 @@ vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_i32m8_tumu(
@@ -364,7 +364,7 @@ vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_tumu(
@@ -373,7 +373,7 @@ vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m1_tumu(
@@ -382,7 +382,7 @@ vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m2_tumu(
@@ -391,7 +391,7 @@ vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m4_tumu(
@@ -400,7 +400,7 @@ vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle32_v_u32m8_tumu(
@@ -409,6 +409,6 @@ vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) {
- return vle32_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle32_tumu(mask, maskedoff, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32ff.c
index abb2d23df8e3..03248b88acc1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32ff.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
//
vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_tu(
@@ -28,7 +28,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *bas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
//
vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_tu(
@@ -40,7 +40,7 @@ vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_tu(
@@ -52,7 +52,7 @@ vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
//
vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_tu(
@@ -64,7 +64,7 @@ vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
//
vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_tu(
@@ -76,7 +76,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_tu(
@@ -88,7 +88,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_tu(
@@ -100,7 +100,7 @@ vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_tu(
@@ -112,7 +112,7 @@ vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_tu(
@@ -124,7 +124,7 @@ vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_tu(
@@ -136,7 +136,7 @@ vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_tu(
@@ -148,7 +148,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_tu(
@@ -160,7 +160,7 @@ vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_tu(
@@ -172,7 +172,7 @@ vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_tu(
@@ -184,7 +184,7 @@ vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_tum(
@@ -196,7 +196,7 @@ vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
//
vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_tum(
@@ -208,7 +208,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
//
vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_tum(
@@ -220,7 +220,7 @@ vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_tum(
@@ -232,7 +232,7 @@ vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
//
vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_tum(
@@ -244,7 +244,7 @@ vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
//
vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_tum(
@@ -256,7 +256,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_tum(
@@ -268,7 +268,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_tum(
@@ -280,7 +280,7 @@ vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_tum(
@@ -292,7 +292,7 @@ vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_tum(
@@ -304,7 +304,7 @@ vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_tum(
@@ -316,7 +316,7 @@ vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_tum(
@@ -328,7 +328,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_tum(
@@ -340,7 +340,7 @@ vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_tum(
@@ -352,7 +352,7 @@ vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_tum(
@@ -364,7 +364,7 @@ vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_tumu(
@@ -376,7 +376,7 @@ vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
//
vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_tumu(
@@ -388,7 +388,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
//
vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_tumu(
@@ -400,7 +400,7 @@ vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_tumu(
@@ -412,7 +412,7 @@ vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
//
vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_tumu(
@@ -424,7 +424,7 @@ vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
//
vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_tumu(
@@ -436,7 +436,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_tumu(
@@ -448,7 +448,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_tumu(
@@ -460,7 +460,7 @@ vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_tumu(
@@ -472,7 +472,7 @@ vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_tumu(
@@ -484,7 +484,7 @@ vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_tumu(
@@ -496,7 +496,7 @@ vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_tumu(
@@ -508,7 +508,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_tumu(
@@ -520,7 +520,7 @@ vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_tumu(
@@ -532,7 +532,7 @@ vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_tumu(
@@ -544,6 +544,6 @@ vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64.c
index 741c34fdabed..33b2e506fa30 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m2_tu(
@@ -22,7 +22,7 @@ vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, s
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m4_tu(
@@ -31,7 +31,7 @@ vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, s
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m8_tu(
@@ -40,7 +40,7 @@ vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, s
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m1_tu(
@@ -49,7 +49,7 @@ vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vle64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m2_tu(
@@ -58,7 +58,7 @@ vint64m1_t test_vle64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vle64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m4_tu(
@@ -67,7 +67,7 @@ vint64m2_t test_vle64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vle64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m8_tu(
@@ -76,7 +76,7 @@ vint64m4_t test_vle64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vle64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m1_tu(
@@ -85,7 +85,7 @@ vint64m8_t test_vle64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m2_tu(
@@ -94,7 +94,7 @@ vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m4_tu(
@@ -103,7 +103,7 @@ vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m8_tu(
@@ -112,7 +112,7 @@ vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tu(maskedoff, base, vl);
+ return __riscv_vle64_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m1_tum(
@@ -121,7 +121,7 @@ vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m2_tum(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m4_tum(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m8_tum(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m1_tum(
@@ -157,7 +157,7 @@ vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vle64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m2_tum(
@@ -166,7 +166,7 @@ vint64m1_t test_vle64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vle64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m4_tum(
@@ -175,7 +175,7 @@ vint64m2_t test_vle64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vle64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m8_tum(
@@ -184,7 +184,7 @@ vint64m4_t test_vle64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vle64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m1_tum(
@@ -193,7 +193,7 @@ vint64m8_t test_vle64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m2_tum(
@@ -202,7 +202,7 @@ vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m4_tum(
@@ -211,7 +211,7 @@ vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m8_tum(
@@ -220,7 +220,7 @@ vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tum(mask, maskedoff, base, vl);
+ return __riscv_vle64_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m1_tumu(
@@ -229,7 +229,7 @@ vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m2_tumu(
@@ -238,7 +238,7 @@ vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m4_tumu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_f64m8_tumu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m1_tumu(
@@ -265,7 +265,7 @@ vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m2_tumu(
@@ -274,7 +274,7 @@ vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m4_tumu(
@@ -283,7 +283,7 @@ vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_i64m8_tumu(
@@ -292,7 +292,7 @@ vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m1_tumu(
@@ -301,7 +301,7 @@ vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m2_tumu(
@@ -310,7 +310,7 @@ vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m4_tumu(
@@ -319,7 +319,7 @@ vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle64_v_u64m8_tumu(
@@ -328,6 +328,6 @@ vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) {
- return vle64_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle64_tumu(mask, maskedoff, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64ff.c
index 53625207154c..ab9f2dc0f84f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64ff.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
//
vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_tu(
@@ -28,7 +28,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_tu(
@@ -40,7 +40,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
//
vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_tu(
@@ -52,7 +52,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
//
vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_tu(
@@ -64,7 +64,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_tu(
@@ -76,7 +76,7 @@ vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_tu(
@@ -88,7 +88,7 @@ vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_tu(
@@ -100,7 +100,7 @@ vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_tu(
@@ -112,7 +112,7 @@ vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_tu(
@@ -124,7 +124,7 @@ vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_tu(
@@ -136,7 +136,7 @@ vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_tu(
@@ -148,7 +148,7 @@ vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_tum(
@@ -160,7 +160,7 @@ vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
//
vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_tum(
@@ -172,7 +172,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_tum(
@@ -184,7 +184,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
//
vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_tum(
@@ -196,7 +196,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
//
vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_tum(
@@ -208,7 +208,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_tum(
@@ -220,7 +220,7 @@ vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_tum(
@@ -232,7 +232,7 @@ vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_tum(
@@ -244,7 +244,7 @@ vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_tum(
@@ -256,7 +256,7 @@ vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_tum(
@@ -268,7 +268,7 @@ vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_tum(
@@ -280,7 +280,7 @@ vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_tum(
@@ -292,7 +292,7 @@ vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_tumu(
@@ -304,7 +304,7 @@ vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
//
vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_tumu(
@@ -316,7 +316,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_tumu(
@@ -328,7 +328,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
//
vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_tumu(
@@ -340,7 +340,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
//
vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_tumu(
@@ -352,7 +352,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_tumu(
@@ -364,7 +364,7 @@ vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_tumu(
@@ -376,7 +376,7 @@ vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_tumu(
@@ -388,7 +388,7 @@ vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_tumu(
@@ -400,7 +400,7 @@ vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_tumu(
@@ -412,7 +412,7 @@ vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_tumu(
@@ -424,7 +424,7 @@ vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_tumu(
@@ -436,6 +436,6 @@ vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8.c
index f238fe444fdd..26f32acf2f94 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vle8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_tu(
@@ -22,7 +22,7 @@ vint8mf8_t test_vle8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vle8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_tu(
@@ -31,7 +31,7 @@ vint8mf4_t test_vle8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vle8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m1_tu(
@@ -40,7 +40,7 @@ vint8mf2_t test_vle8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vle8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m2_tu(
@@ -49,7 +49,7 @@ vint8m1_t test_vle8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vle8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m4_tu(
@@ -58,7 +58,7 @@ vint8m2_t test_vle8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vle8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m8_tu(
@@ -67,7 +67,7 @@ vint8m4_t test_vle8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vle8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_tu(
@@ -76,7 +76,7 @@ vint8m8_t test_vle8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_tu(
@@ -85,7 +85,7 @@ vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_tu(
@@ -94,7 +94,7 @@ vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m1_tu(
@@ -103,7 +103,7 @@ vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vle8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m2_tu(
@@ -112,7 +112,7 @@ vuint8m1_t test_vle8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vle8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m4_tu(
@@ -121,7 +121,7 @@ vuint8m2_t test_vle8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vle8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m8_tu(
@@ -130,7 +130,7 @@ vuint8m4_t test_vle8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vle8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tu(maskedoff, base, vl);
+ return __riscv_vle8_tu(maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf8_tum(
@@ -139,7 +139,7 @@ vuint8m8_t test_vle8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_tum(
@@ -148,7 +148,7 @@ vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_tum(
@@ -157,7 +157,7 @@ vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m1_tum(
@@ -166,7 +166,7 @@ vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vle8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m2_tum(
@@ -175,7 +175,7 @@ vint8m1_t test_vle8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vle8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m4_tum(
@@ -184,7 +184,7 @@ vint8m2_t test_vle8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vle8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m8_tum(
@@ -193,7 +193,7 @@ vint8m4_t test_vle8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vle8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_tum(
@@ -202,7 +202,7 @@ vint8m8_t test_vle8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_tum(
@@ -211,7 +211,7 @@ vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_tum(
@@ -220,7 +220,7 @@ vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m1_tum(
@@ -229,7 +229,7 @@ vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m2_tum(
@@ -238,7 +238,7 @@ vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m4_tum(
@@ -247,7 +247,7 @@ vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m8_tum(
@@ -256,7 +256,7 @@ vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tum(mask, maskedoff, base, vl);
+ return __riscv_vle8_tum(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf8_tumu(
@@ -265,7 +265,7 @@ vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_tumu(
@@ -274,7 +274,7 @@ vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_tumu(
@@ -283,7 +283,7 @@ vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m1_tumu(
@@ -292,7 +292,7 @@ vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m2_tumu(
@@ -301,7 +301,7 @@ vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m4_tumu(
@@ -310,7 +310,7 @@ vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_i8m8_tumu(
@@ -319,7 +319,7 @@ vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_tumu(
@@ -328,7 +328,7 @@ vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_tumu(
@@ -337,7 +337,7 @@ vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_tumu(
@@ -346,7 +346,7 @@ vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m1_tumu(
@@ -355,7 +355,7 @@ vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m2_tumu(
@@ -364,7 +364,7 @@ vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m4_tumu(
@@ -373,7 +373,7 @@ vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
// CHECK-RV64-LABEL: @test_vle8_v_u8m8_tumu(
@@ -382,6 +382,6 @@ vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) {
- return vle8_tumu(mask, maskedoff, base, vl);
+ return __riscv_vle8_tumu(mask, maskedoff, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8ff.c
index 100827da13ed..326858793ec1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8ff.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_tu(
@@ -28,7 +28,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_tu(
@@ -40,7 +40,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_tu(
@@ -52,7 +52,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_tu(
@@ -64,7 +64,7 @@ vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_tu(
@@ -76,7 +76,7 @@ vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_tu(
@@ -88,7 +88,7 @@ vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_tu(
@@ -100,7 +100,7 @@ vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_tu(
@@ -112,7 +112,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_tu(
@@ -124,7 +124,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_tu(
@@ -136,7 +136,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_tu(
@@ -148,7 +148,7 @@ vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_tu(
@@ -160,7 +160,7 @@ vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_tu(
@@ -172,7 +172,7 @@ vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tu(maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_tum(
@@ -184,7 +184,7 @@ vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_tum(
@@ -196,7 +196,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_tum(
@@ -208,7 +208,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_tum(
@@ -220,7 +220,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_tum(
@@ -232,7 +232,7 @@ vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_tum(
@@ -244,7 +244,7 @@ vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_tum(
@@ -256,7 +256,7 @@ vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_tum(
@@ -268,7 +268,7 @@ vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_tum(
@@ -280,7 +280,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_tum(
@@ -292,7 +292,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_tum(
@@ -304,7 +304,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_tum(
@@ -316,7 +316,7 @@ vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_tum(
@@ -328,7 +328,7 @@ vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_tum(
@@ -340,7 +340,7 @@ vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tum(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_tumu(
@@ -352,7 +352,7 @@ vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_tumu(
@@ -364,7 +364,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_tumu(
@@ -376,7 +376,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_tumu(
@@ -388,7 +388,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_tumu(
@@ -400,7 +400,7 @@ vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_tumu(
@@ -412,7 +412,7 @@ vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_tumu(
@@ -424,7 +424,7 @@ vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_tumu(
@@ -436,7 +436,7 @@ vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_tumu(
@@ -448,7 +448,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_tumu(
@@ -460,7 +460,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_tumu(
@@ -472,7 +472,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_tumu(
@@ -484,7 +484,7 @@ vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_tumu(
@@ -496,7 +496,7 @@ vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_tumu(
@@ -508,6 +508,6 @@ vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
+ return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei16.c
index 1b4262d617dd..5b9007afb032 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_tu(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_tu(
@@ -157,7 +157,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_tu(
@@ -166,7 +166,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_tu(
@@ -175,7 +175,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_tu(
@@ -184,7 +184,7 @@ vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_tu(
@@ -193,7 +193,7 @@ vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_tu(
@@ -202,7 +202,7 @@ vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_tu(
@@ -211,7 +211,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_tu(
@@ -220,7 +220,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_tu(
@@ -229,7 +229,7 @@ vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_tu(
@@ -238,7 +238,7 @@ vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_tu(
@@ -247,7 +247,7 @@ vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_tu(
@@ -256,7 +256,7 @@ vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_tu(
@@ -265,7 +265,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_tu(
@@ -274,7 +274,7 @@ vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_tu(
@@ -283,7 +283,7 @@ vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_tu(
@@ -292,7 +292,7 @@ vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_tu(
@@ -301,7 +301,7 @@ vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_tu(
@@ -310,7 +310,7 @@ vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_tu(
@@ -319,7 +319,7 @@ vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_tu(
@@ -328,7 +328,7 @@ vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_tu(
@@ -337,7 +337,7 @@ vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_tu(
@@ -346,7 +346,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_tu(
@@ -355,7 +355,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_tu(
@@ -364,7 +364,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_tu(
@@ -373,7 +373,7 @@ vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_tu(
@@ -382,7 +382,7 @@ vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_tu(
@@ -391,7 +391,7 @@ vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_tu(
@@ -400,7 +400,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_tu(
@@ -409,7 +409,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_tu(
@@ -418,7 +418,7 @@ vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_tu(
@@ -427,7 +427,7 @@ vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_tu(
@@ -436,7 +436,7 @@ vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_tu(
@@ -445,7 +445,7 @@ vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_tu(
@@ -454,7 +454,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_tu(
@@ -463,7 +463,7 @@ vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_tu(
@@ -472,7 +472,7 @@ vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_tu(
@@ -481,7 +481,7 @@ vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_tu(
@@ -490,7 +490,7 @@ vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_tu(
@@ -499,7 +499,7 @@ vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_tu(
@@ -508,7 +508,7 @@ vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_tu(
@@ -517,7 +517,7 @@ vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_tum(
@@ -526,7 +526,7 @@ vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_tum(
@@ -535,7 +535,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_tum(
@@ -544,7 +544,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_tum(
@@ -553,7 +553,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_tum(
@@ -562,7 +562,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_tum(
@@ -571,7 +571,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_tum(
@@ -580,7 +580,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_tum(
@@ -589,7 +589,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_tum(
@@ -598,7 +598,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_tum(
@@ -607,7 +607,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_tum(
@@ -616,7 +616,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_tum(
@@ -625,7 +625,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_tum(
@@ -634,7 +634,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_tum(
@@ -643,7 +643,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_tum(
@@ -652,7 +652,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_tum(
@@ -661,7 +661,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_tum(
@@ -670,7 +670,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_tum(
@@ -679,7 +679,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_tum(
@@ -688,7 +688,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_tum(
@@ -697,7 +697,7 @@ vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_tum(
@@ -706,7 +706,7 @@ vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_tum(
@@ -715,7 +715,7 @@ vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_tum(
@@ -724,7 +724,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_tum(
@@ -733,7 +733,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_tum(
@@ -742,7 +742,7 @@ vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_tum(
@@ -751,7 +751,7 @@ vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_tum(
@@ -760,7 +760,7 @@ vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_tum(
@@ -769,7 +769,7 @@ vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_tum(
@@ -778,7 +778,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_tum(
@@ -787,7 +787,7 @@ vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_tum(
@@ -796,7 +796,7 @@ vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_tum(
@@ -805,7 +805,7 @@ vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_tum(
@@ -814,7 +814,7 @@ vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_tum(
@@ -823,7 +823,7 @@ vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_tum(
@@ -832,7 +832,7 @@ vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_tum(
@@ -841,7 +841,7 @@ vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_tum(
@@ -850,7 +850,7 @@ vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_tum(
@@ -859,7 +859,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_tum(
@@ -868,7 +868,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_tum(
@@ -877,7 +877,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_tum(
@@ -886,7 +886,7 @@ vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_tum(
@@ -895,7 +895,7 @@ vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_tum(
@@ -904,7 +904,7 @@ vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_tum(
@@ -913,7 +913,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_tum(
@@ -922,7 +922,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_tum(
@@ -931,7 +931,7 @@ vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_tum(
@@ -940,7 +940,7 @@ vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_tum(
@@ -949,7 +949,7 @@ vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_tum(
@@ -958,7 +958,7 @@ vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_tum(
@@ -967,7 +967,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_tum(
@@ -976,7 +976,7 @@ vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_tum(
@@ -985,7 +985,7 @@ vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_tum(
@@ -994,7 +994,7 @@ vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_tum(
@@ -1003,7 +1003,7 @@ vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_tum(
@@ -1012,7 +1012,7 @@ vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_tum(
@@ -1021,7 +1021,7 @@ vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_tum(
@@ -1030,7 +1030,7 @@ vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_tumu(
@@ -1039,7 +1039,7 @@ vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_tumu(
@@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_tumu(
@@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_tumu(
@@ -1066,7 +1066,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_tumu(
@@ -1075,7 +1075,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_tumu(
@@ -1084,7 +1084,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_tumu(
@@ -1093,7 +1093,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_tumu(
@@ -1102,7 +1102,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_tumu(
@@ -1111,7 +1111,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_tumu(
@@ -1120,7 +1120,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_tumu(
@@ -1129,7 +1129,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_tumu(
@@ -1138,7 +1138,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_tumu(
@@ -1147,7 +1147,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_tumu(
@@ -1156,7 +1156,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_tumu(
@@ -1165,7 +1165,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_tumu(
@@ -1174,7 +1174,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_tumu(
@@ -1183,7 +1183,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_tumu(
@@ -1192,7 +1192,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_tumu(
@@ -1201,7 +1201,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_tumu(
@@ -1210,7 +1210,7 @@ vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_tumu(
@@ -1219,7 +1219,7 @@ vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_tumu(
@@ -1228,7 +1228,7 @@ vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_tumu(
@@ -1237,7 +1237,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_tumu(
@@ -1246,7 +1246,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_tumu(
@@ -1255,7 +1255,7 @@ vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_tumu(
@@ -1264,7 +1264,7 @@ vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_tumu(
@@ -1273,7 +1273,7 @@ vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_tumu(
@@ -1282,7 +1282,7 @@ vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_tumu(
@@ -1291,7 +1291,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_tumu(
@@ -1300,7 +1300,7 @@ vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_tumu(
@@ -1309,7 +1309,7 @@ vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_tumu(
@@ -1318,7 +1318,7 @@ vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_tumu(
@@ -1327,7 +1327,7 @@ vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_tumu(
@@ -1336,7 +1336,7 @@ vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_tumu(
@@ -1345,7 +1345,7 @@ vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_tumu(
@@ -1354,7 +1354,7 @@ vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_tumu(
@@ -1363,7 +1363,7 @@ vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_tumu(
@@ -1372,7 +1372,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_tumu(
@@ -1381,7 +1381,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_tumu(
@@ -1390,7 +1390,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_tumu(
@@ -1399,7 +1399,7 @@ vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_tumu(
@@ -1408,7 +1408,7 @@ vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_tumu(
@@ -1417,7 +1417,7 @@ vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_tumu(
@@ -1426,7 +1426,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_tumu(
@@ -1435,7 +1435,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_tumu(
@@ -1444,7 +1444,7 @@ vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_tumu(
@@ -1453,7 +1453,7 @@ vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_tumu(
@@ -1462,7 +1462,7 @@ vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_tumu(
@@ -1471,7 +1471,7 @@ vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_tumu(
@@ -1480,7 +1480,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_tumu(
@@ -1489,7 +1489,7 @@ vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_tumu(
@@ -1498,7 +1498,7 @@ vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_tumu(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_tumu(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_tumu(
@@ -1525,7 +1525,7 @@ vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_tumu(
@@ -1534,7 +1534,7 @@ vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_tumu(
@@ -1543,7 +1543,7 @@ vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_mu(
@@ -1552,7 +1552,7 @@ vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_mu(
@@ -1561,7 +1561,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_mu(
@@ -1570,7 +1570,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_mu(
@@ -1579,7 +1579,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_mu(
@@ -1588,7 +1588,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_mu(
@@ -1597,7 +1597,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_mu(
@@ -1606,7 +1606,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_mu(
@@ -1615,7 +1615,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_mu(
@@ -1624,7 +1624,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_mu(
@@ -1633,7 +1633,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_mu(
@@ -1642,7 +1642,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_mu(
@@ -1651,7 +1651,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_mu(
@@ -1660,7 +1660,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_mu(
@@ -1669,7 +1669,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_mu(
@@ -1678,7 +1678,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_mu(
@@ -1687,7 +1687,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_mu(
@@ -1696,7 +1696,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_mu(
@@ -1705,7 +1705,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_mu(
@@ -1714,7 +1714,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_mu(
@@ -1723,7 +1723,7 @@ vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_mu(
@@ -1732,7 +1732,7 @@ vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_mu(
@@ -1741,7 +1741,7 @@ vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_mu(
@@ -1750,7 +1750,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_mu(
@@ -1759,7 +1759,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_mu(
@@ -1768,7 +1768,7 @@ vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_mu(
@@ -1777,7 +1777,7 @@ vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_mu(
@@ -1786,7 +1786,7 @@ vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_mu(
@@ -1795,7 +1795,7 @@ vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_mu(
@@ -1804,7 +1804,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_mu(
@@ -1813,7 +1813,7 @@ vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_mu(
@@ -1822,7 +1822,7 @@ vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_mu(
@@ -1831,7 +1831,7 @@ vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_mu(
@@ -1840,7 +1840,7 @@ vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_mu(
@@ -1849,7 +1849,7 @@ vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_mu(
@@ -1858,7 +1858,7 @@ vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_mu(
@@ -1867,7 +1867,7 @@ vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_mu(
@@ -1876,7 +1876,7 @@ vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_mu(
@@ -1885,7 +1885,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_mu(
@@ -1894,7 +1894,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_mu(
@@ -1903,7 +1903,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_mu(
@@ -1912,7 +1912,7 @@ vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_mu(
@@ -1921,7 +1921,7 @@ vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_mu(
@@ -1930,7 +1930,7 @@ vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_mu(
@@ -1939,7 +1939,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_mu(
@@ -1948,7 +1948,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_mu(
@@ -1957,7 +1957,7 @@ vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_mu(
@@ -1966,7 +1966,7 @@ vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_mu(
@@ -1975,7 +1975,7 @@ vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_mu(
@@ -1984,7 +1984,7 @@ vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_mu(
@@ -1993,7 +1993,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_mu(
@@ -2002,7 +2002,7 @@ vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_mu(
@@ -2011,7 +2011,7 @@ vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_mu(
@@ -2020,7 +2020,7 @@ vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_mu(
@@ -2029,7 +2029,7 @@ vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_mu(
@@ -2038,7 +2038,7 @@ vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_mu(
@@ -2047,7 +2047,7 @@ vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_mu(
@@ -2056,6 +2056,6 @@ vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei16_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei32.c
index 2b60e367898a..9c8f27c274c8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_tu(
@@ -67,7 +67,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_tu(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_tu(
@@ -85,7 +85,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_tu(
@@ -94,7 +94,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_tu(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_tu(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_tu(
@@ -121,7 +121,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_tu(
@@ -130,7 +130,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_tu(
@@ -139,7 +139,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_tu(
@@ -148,7 +148,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_tu(
@@ -157,7 +157,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_tu(
@@ -166,7 +166,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_tu(
@@ -175,7 +175,7 @@ vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_tu(
@@ -184,7 +184,7 @@ vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_tu(
@@ -193,7 +193,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_tu(
@@ -202,7 +202,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_tu(
@@ -211,7 +211,7 @@ vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_tu(
@@ -220,7 +220,7 @@ vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_tu(
@@ -229,7 +229,7 @@ vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_tu(
@@ -238,7 +238,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_tu(
@@ -247,7 +247,7 @@ vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_tu(
@@ -256,7 +256,7 @@ vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_tu(
@@ -265,7 +265,7 @@ vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_tu(
@@ -274,7 +274,7 @@ vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_tu(
@@ -283,7 +283,7 @@ vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_tu(
@@ -292,7 +292,7 @@ vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_tu(
@@ -301,7 +301,7 @@ vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_tu(
@@ -310,7 +310,7 @@ vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_tu(
@@ -319,7 +319,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_tu(
@@ -328,7 +328,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_tu(
@@ -337,7 +337,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_tu(
@@ -346,7 +346,7 @@ vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_tu(
@@ -355,7 +355,7 @@ vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_tu(
@@ -364,7 +364,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_tu(
@@ -373,7 +373,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_tu(
@@ -382,7 +382,7 @@ vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_tu(
@@ -391,7 +391,7 @@ vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_tu(
@@ -400,7 +400,7 @@ vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_tu(
@@ -409,7 +409,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_tu(
@@ -418,7 +418,7 @@ vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_tu(
@@ -427,7 +427,7 @@ vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_tu(
@@ -436,7 +436,7 @@ vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_tu(
@@ -445,7 +445,7 @@ vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_tu(
@@ -454,7 +454,7 @@ vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_tu(
@@ -463,7 +463,7 @@ vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_tu(
@@ -472,7 +472,7 @@ vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_tum(
@@ -481,7 +481,7 @@ vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_tum(
@@ -490,7 +490,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_tum(
@@ -499,7 +499,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_tum(
@@ -508,7 +508,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_tum(
@@ -517,7 +517,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_tum(
@@ -526,7 +526,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_tum(
@@ -535,7 +535,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_tum(
@@ -544,7 +544,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_tum(
@@ -553,7 +553,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_tum(
@@ -562,7 +562,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_tum(
@@ -571,7 +571,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_tum(
@@ -580,7 +580,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_tum(
@@ -589,7 +589,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_tum(
@@ -598,7 +598,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_tum(
@@ -607,7 +607,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_tum(
@@ -616,7 +616,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_tum(
@@ -625,7 +625,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_tum(
@@ -634,7 +634,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_tum(
@@ -643,7 +643,7 @@ vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_tum(
@@ -652,7 +652,7 @@ vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_tum(
@@ -661,7 +661,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_tum(
@@ -670,7 +670,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_tum(
@@ -679,7 +679,7 @@ vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_tum(
@@ -688,7 +688,7 @@ vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_tum(
@@ -697,7 +697,7 @@ vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_tum(
@@ -706,7 +706,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_tum(
@@ -715,7 +715,7 @@ vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_tum(
@@ -724,7 +724,7 @@ vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_tum(
@@ -733,7 +733,7 @@ vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_tum(
@@ -742,7 +742,7 @@ vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_tum(
@@ -751,7 +751,7 @@ vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_tum(
@@ -760,7 +760,7 @@ vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_tum(
@@ -769,7 +769,7 @@ vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_tum(
@@ -778,7 +778,7 @@ vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_tum(
@@ -787,7 +787,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_tum(
@@ -796,7 +796,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_tum(
@@ -805,7 +805,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_tum(
@@ -814,7 +814,7 @@ vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_tum(
@@ -823,7 +823,7 @@ vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_tum(
@@ -832,7 +832,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_tum(
@@ -841,7 +841,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_tum(
@@ -850,7 +850,7 @@ vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_tum(
@@ -859,7 +859,7 @@ vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_tum(
@@ -868,7 +868,7 @@ vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_tum(
@@ -877,7 +877,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_tum(
@@ -886,7 +886,7 @@ vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_tum(
@@ -895,7 +895,7 @@ vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_tum(
@@ -904,7 +904,7 @@ vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_tum(
@@ -913,7 +913,7 @@ vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_tum(
@@ -922,7 +922,7 @@ vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_tum(
@@ -931,7 +931,7 @@ vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_tum(
@@ -940,7 +940,7 @@ vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_tumu(
@@ -949,7 +949,7 @@ vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_tumu(
@@ -958,7 +958,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_tumu(
@@ -967,7 +967,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_tumu(
@@ -976,7 +976,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_tumu(
@@ -985,7 +985,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_tumu(
@@ -994,7 +994,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_tumu(
@@ -1003,7 +1003,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_tumu(
@@ -1012,7 +1012,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_tumu(
@@ -1021,7 +1021,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_tumu(
@@ -1030,7 +1030,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_tumu(
@@ -1039,7 +1039,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_tumu(
@@ -1048,7 +1048,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_tumu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_tumu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_tumu(
@@ -1075,7 +1075,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_tumu(
@@ -1084,7 +1084,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_tumu(
@@ -1093,7 +1093,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_tumu(
@@ -1102,7 +1102,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_tumu(
@@ -1111,7 +1111,7 @@ vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_tumu(
@@ -1120,7 +1120,7 @@ vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_tumu(
@@ -1129,7 +1129,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_tumu(
@@ -1138,7 +1138,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_tumu(
@@ -1147,7 +1147,7 @@ vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_tumu(
@@ -1156,7 +1156,7 @@ vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_tumu(
@@ -1165,7 +1165,7 @@ vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_tumu(
@@ -1174,7 +1174,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_tumu(
@@ -1183,7 +1183,7 @@ vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_tumu(
@@ -1192,7 +1192,7 @@ vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_tumu(
@@ -1201,7 +1201,7 @@ vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_tumu(
@@ -1210,7 +1210,7 @@ vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_tumu(
@@ -1219,7 +1219,7 @@ vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_tumu(
@@ -1228,7 +1228,7 @@ vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_tumu(
@@ -1237,7 +1237,7 @@ vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_tumu(
@@ -1246,7 +1246,7 @@ vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_tumu(
@@ -1255,7 +1255,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_tumu(
@@ -1264,7 +1264,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_tumu(
@@ -1273,7 +1273,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_tumu(
@@ -1282,7 +1282,7 @@ vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_tumu(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_tumu(
@@ -1300,7 +1300,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_tumu(
@@ -1309,7 +1309,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_tumu(
@@ -1318,7 +1318,7 @@ vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_tumu(
@@ -1327,7 +1327,7 @@ vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_tumu(
@@ -1336,7 +1336,7 @@ vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_tumu(
@@ -1345,7 +1345,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_tumu(
@@ -1354,7 +1354,7 @@ vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_tumu(
@@ -1363,7 +1363,7 @@ vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_tumu(
@@ -1372,7 +1372,7 @@ vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_tumu(
@@ -1381,7 +1381,7 @@ vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_tumu(
@@ -1390,7 +1390,7 @@ vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_tumu(
@@ -1399,7 +1399,7 @@ vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_tumu(
@@ -1408,7 +1408,7 @@ vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_mu(
@@ -1417,7 +1417,7 @@ vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_mu(
@@ -1426,7 +1426,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_mu(
@@ -1435,7 +1435,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_mu(
@@ -1444,7 +1444,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_mu(
@@ -1453,7 +1453,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_mu(
@@ -1462,7 +1462,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_mu(
@@ -1471,7 +1471,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_mu(
@@ -1480,7 +1480,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_mu(
@@ -1489,7 +1489,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_mu(
@@ -1498,7 +1498,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_mu(
@@ -1507,7 +1507,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_mu(
@@ -1516,7 +1516,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_mu(
@@ -1525,7 +1525,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_mu(
@@ -1534,7 +1534,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_mu(
@@ -1543,7 +1543,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_mu(
@@ -1552,7 +1552,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_mu(
@@ -1561,7 +1561,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_mu(
@@ -1570,7 +1570,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_mu(
@@ -1579,7 +1579,7 @@ vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_mu(
@@ -1588,7 +1588,7 @@ vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_mu(
@@ -1597,7 +1597,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_mu(
@@ -1606,7 +1606,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_mu(
@@ -1615,7 +1615,7 @@ vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_mu(
@@ -1624,7 +1624,7 @@ vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_mu(
@@ -1633,7 +1633,7 @@ vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_mu(
@@ -1642,7 +1642,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_mu(
@@ -1651,7 +1651,7 @@ vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_mu(
@@ -1660,7 +1660,7 @@ vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_mu(
@@ -1669,7 +1669,7 @@ vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_mu(
@@ -1678,7 +1678,7 @@ vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_mu(
@@ -1687,7 +1687,7 @@ vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_mu(
@@ -1696,7 +1696,7 @@ vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_mu(
@@ -1705,7 +1705,7 @@ vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_mu(
@@ -1714,7 +1714,7 @@ vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_mu(
@@ -1723,7 +1723,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_mu(
@@ -1732,7 +1732,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_mu(
@@ -1741,7 +1741,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_mu(
@@ -1750,7 +1750,7 @@ vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_mu(
@@ -1759,7 +1759,7 @@ vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_mu(
@@ -1768,7 +1768,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_mu(
@@ -1777,7 +1777,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_mu(
@@ -1786,7 +1786,7 @@ vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_mu(
@@ -1795,7 +1795,7 @@ vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_mu(
@@ -1804,7 +1804,7 @@ vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_mu(
@@ -1813,7 +1813,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_mu(
@@ -1822,7 +1822,7 @@ vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_mu(
@@ -1831,7 +1831,7 @@ vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_mu(
@@ -1840,7 +1840,7 @@ vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_mu(
@@ -1849,7 +1849,7 @@ vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_mu(
@@ -1858,7 +1858,7 @@ vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_mu(
@@ -1867,7 +1867,7 @@ vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_mu(
@@ -1876,6 +1876,6 @@ vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei32_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei64.c
index d562b7fbad1a..2d247f0bb97d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_tu(
@@ -58,7 +58,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_tu(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_tu(
@@ -76,7 +76,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_tu(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_tu(
@@ -94,7 +94,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_tu(
@@ -103,7 +103,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_tu(
@@ -112,7 +112,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_tu(
@@ -121,7 +121,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_tu(
@@ -130,7 +130,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_tu(
@@ -139,7 +139,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_tu(
@@ -148,7 +148,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_tu(
@@ -157,7 +157,7 @@ vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_tu(
@@ -166,7 +166,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_tu(
@@ -175,7 +175,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_tu(
@@ -184,7 +184,7 @@ vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_tu(
@@ -193,7 +193,7 @@ vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_tu(
@@ -202,7 +202,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_tu(
@@ -211,7 +211,7 @@ vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_tu(
@@ -220,7 +220,7 @@ vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_tu(
@@ -229,7 +229,7 @@ vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_tu(
@@ -238,7 +238,7 @@ vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_tu(
@@ -247,7 +247,7 @@ vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_tu(
@@ -256,7 +256,7 @@ vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_tu(
@@ -265,7 +265,7 @@ vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_tu(
@@ -274,7 +274,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_tu(
@@ -283,7 +283,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_tu(
@@ -292,7 +292,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_tu(
@@ -301,7 +301,7 @@ vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_tu(
@@ -310,7 +310,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_tu(
@@ -319,7 +319,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_tu(
@@ -328,7 +328,7 @@ vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_tu(
@@ -337,7 +337,7 @@ vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_tu(
@@ -346,7 +346,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_tu(
@@ -355,7 +355,7 @@ vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_tu(
@@ -364,7 +364,7 @@ vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_tu(
@@ -373,7 +373,7 @@ vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_tu(
@@ -382,7 +382,7 @@ vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_tu(
@@ -391,7 +391,7 @@ vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_tu(
@@ -400,7 +400,7 @@ vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_tum(
@@ -409,7 +409,7 @@ vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_tum(
@@ -418,7 +418,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_tum(
@@ -427,7 +427,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_tum(
@@ -436,7 +436,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_tum(
@@ -445,7 +445,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_tum(
@@ -454,7 +454,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_tum(
@@ -463,7 +463,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_tum(
@@ -472,7 +472,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_tum(
@@ -499,7 +499,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_tum(
@@ -508,7 +508,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_tum(
@@ -517,7 +517,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_tum(
@@ -526,7 +526,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_tum(
@@ -535,7 +535,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_tum(
@@ -544,7 +544,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_tum(
@@ -553,7 +553,7 @@ vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_tum(
@@ -562,7 +562,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_tum(
@@ -571,7 +571,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_tum(
@@ -580,7 +580,7 @@ vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_tum(
@@ -589,7 +589,7 @@ vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_tum(
@@ -598,7 +598,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_tum(
@@ -607,7 +607,7 @@ vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_tum(
@@ -616,7 +616,7 @@ vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_tum(
@@ -625,7 +625,7 @@ vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_tum(
@@ -634,7 +634,7 @@ vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_tum(
@@ -643,7 +643,7 @@ vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_tum(
@@ -652,7 +652,7 @@ vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_tum(
@@ -661,7 +661,7 @@ vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_tum(
@@ -670,7 +670,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_tum(
@@ -679,7 +679,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_tum(
@@ -688,7 +688,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_tum(
@@ -697,7 +697,7 @@ vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_tum(
@@ -706,7 +706,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_tum(
@@ -715,7 +715,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_tum(
@@ -724,7 +724,7 @@ vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_tum(
@@ -733,7 +733,7 @@ vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_tum(
@@ -742,7 +742,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_tum(
@@ -751,7 +751,7 @@ vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_tum(
@@ -760,7 +760,7 @@ vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_tum(
@@ -769,7 +769,7 @@ vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_tum(
@@ -778,7 +778,7 @@ vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_tum(
@@ -787,7 +787,7 @@ vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_tum(
@@ -796,7 +796,7 @@ vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_tumu(
@@ -805,7 +805,7 @@ vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_tumu(
@@ -814,7 +814,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_tumu(
@@ -823,7 +823,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_tumu(
@@ -832,7 +832,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_tumu(
@@ -841,7 +841,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_tumu(
@@ -850,7 +850,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_tumu(
@@ -859,7 +859,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_tumu(
@@ -868,7 +868,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_tumu(
@@ -877,7 +877,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_tumu(
@@ -886,7 +886,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_tumu(
@@ -895,7 +895,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_tumu(
@@ -904,7 +904,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_tumu(
@@ -913,7 +913,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_tumu(
@@ -922,7 +922,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_tumu(
@@ -931,7 +931,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_tumu(
@@ -940,7 +940,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_tumu(
@@ -949,7 +949,7 @@ vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_tumu(
@@ -958,7 +958,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_tumu(
@@ -967,7 +967,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_tumu(
@@ -976,7 +976,7 @@ vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_tumu(
@@ -985,7 +985,7 @@ vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_tumu(
@@ -994,7 +994,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_tumu(
@@ -1003,7 +1003,7 @@ vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_tumu(
@@ -1012,7 +1012,7 @@ vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_tumu(
@@ -1021,7 +1021,7 @@ vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_tumu(
@@ -1030,7 +1030,7 @@ vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_tumu(
@@ -1039,7 +1039,7 @@ vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_tumu(
@@ -1048,7 +1048,7 @@ vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_tumu(
@@ -1057,7 +1057,7 @@ vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_tumu(
@@ -1066,7 +1066,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_tumu(
@@ -1075,7 +1075,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_tumu(
@@ -1084,7 +1084,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_tumu(
@@ -1093,7 +1093,7 @@ vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_tumu(
@@ -1102,7 +1102,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_tumu(
@@ -1111,7 +1111,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_tumu(
@@ -1120,7 +1120,7 @@ vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_tumu(
@@ -1129,7 +1129,7 @@ vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_tumu(
@@ -1138,7 +1138,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_tumu(
@@ -1147,7 +1147,7 @@ vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_tumu(
@@ -1156,7 +1156,7 @@ vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_tumu(
@@ -1165,7 +1165,7 @@ vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_tumu(
@@ -1174,7 +1174,7 @@ vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_tumu(
@@ -1183,7 +1183,7 @@ vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_tumu(
@@ -1192,7 +1192,7 @@ vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_mu(
@@ -1201,7 +1201,7 @@ vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_mu(
@@ -1210,7 +1210,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_mu(
@@ -1219,7 +1219,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_mu(
@@ -1228,7 +1228,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_mu(
@@ -1237,7 +1237,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_mu(
@@ -1246,7 +1246,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_mu(
@@ -1255,7 +1255,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_mu(
@@ -1264,7 +1264,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_mu(
@@ -1273,7 +1273,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_mu(
@@ -1282,7 +1282,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_mu(
@@ -1291,7 +1291,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_mu(
@@ -1300,7 +1300,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_mu(
@@ -1309,7 +1309,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_mu(
@@ -1318,7 +1318,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_mu(
@@ -1327,7 +1327,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_mu(
@@ -1336,7 +1336,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_mu(
@@ -1345,7 +1345,7 @@ vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_mu(
@@ -1354,7 +1354,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_mu(
@@ -1363,7 +1363,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_mu(
@@ -1372,7 +1372,7 @@ vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_mu(
@@ -1381,7 +1381,7 @@ vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_mu(
@@ -1390,7 +1390,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_mu(
@@ -1399,7 +1399,7 @@ vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_mu(
@@ -1408,7 +1408,7 @@ vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_mu(
@@ -1417,7 +1417,7 @@ vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_mu(
@@ -1426,7 +1426,7 @@ vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_mu(
@@ -1435,7 +1435,7 @@ vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_mu(
@@ -1444,7 +1444,7 @@ vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_mu(
@@ -1453,7 +1453,7 @@ vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_mu(
@@ -1462,7 +1462,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_mu(
@@ -1471,7 +1471,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_mu(
@@ -1480,7 +1480,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_mu(
@@ -1489,7 +1489,7 @@ vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_mu(
@@ -1498,7 +1498,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_mu(
@@ -1507,7 +1507,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_mu(
@@ -1516,7 +1516,7 @@ vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_mu(
@@ -1525,7 +1525,7 @@ vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_mu(
@@ -1534,7 +1534,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_mu(
@@ -1543,7 +1543,7 @@ vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_mu(
@@ -1552,7 +1552,7 @@ vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_mu(
@@ -1561,7 +1561,7 @@ vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_mu(
@@ -1570,7 +1570,7 @@ vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_mu(
@@ -1579,7 +1579,7 @@ vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_mu(
@@ -1588,6 +1588,6 @@ vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei8.c
index b2b7ad58e7ad..4d69857ae7bf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *bas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_tu(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_tu(
@@ -157,7 +157,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_tu(
@@ -166,7 +166,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_tu(
@@ -175,7 +175,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_tu(
@@ -184,7 +184,7 @@ vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_tu(
@@ -193,7 +193,7 @@ vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_tu(
@@ -202,7 +202,7 @@ vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_tu(
@@ -211,7 +211,7 @@ vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_tu(
@@ -220,7 +220,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_tu(
@@ -229,7 +229,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_tu(
@@ -238,7 +238,7 @@ vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_tu(
@@ -247,7 +247,7 @@ vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_tu(
@@ -256,7 +256,7 @@ vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_tu(
@@ -265,7 +265,7 @@ vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_tu(
@@ -274,7 +274,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_tu(
@@ -301,7 +301,7 @@ vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_tu(
@@ -310,7 +310,7 @@ vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_tu(
@@ -319,7 +319,7 @@ vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_tu(
@@ -328,7 +328,7 @@ vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_tu(
@@ -337,7 +337,7 @@ vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_tu(
@@ -346,7 +346,7 @@ vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_tu(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_tu(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_tu(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_tu(
@@ -382,7 +382,7 @@ vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_tu(
@@ -391,7 +391,7 @@ vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_tu(
@@ -400,7 +400,7 @@ vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_tu(
@@ -409,7 +409,7 @@ vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_tu(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_tu(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *ba
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_tu(
@@ -436,7 +436,7 @@ vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_tu(
@@ -445,7 +445,7 @@ vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_tu(
@@ -454,7 +454,7 @@ vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_tu(
@@ -463,7 +463,7 @@ vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_tu(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_tu(
@@ -481,7 +481,7 @@ vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_tu(
@@ -490,7 +490,7 @@ vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_tu(
@@ -499,7 +499,7 @@ vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_tu(
@@ -508,7 +508,7 @@ vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_tu(
@@ -517,7 +517,7 @@ vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_tu(
@@ -526,7 +526,7 @@ vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_tu(
@@ -535,7 +535,7 @@ vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_tum(
@@ -544,7 +544,7 @@ vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_tum(
@@ -553,7 +553,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_tum(
@@ -562,7 +562,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_tum(
@@ -571,7 +571,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_tum(
@@ -580,7 +580,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_tum(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_tum(
@@ -598,7 +598,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_tum(
@@ -607,7 +607,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_tum(
@@ -616,7 +616,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_tum(
@@ -625,7 +625,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_tum(
@@ -634,7 +634,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_tum(
@@ -643,7 +643,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_tum(
@@ -652,7 +652,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_tum(
@@ -661,7 +661,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_tum(
@@ -670,7 +670,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_tum(
@@ -679,7 +679,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_tum(
@@ -688,7 +688,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_tum(
@@ -697,7 +697,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_tum(
@@ -706,7 +706,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_tum(
@@ -715,7 +715,7 @@ vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_tum(
@@ -724,7 +724,7 @@ vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_tum(
@@ -733,7 +733,7 @@ vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_tum(
@@ -742,7 +742,7 @@ vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_tum(
@@ -751,7 +751,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_tum(
@@ -760,7 +760,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_tum(
@@ -769,7 +769,7 @@ vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_tum(
@@ -778,7 +778,7 @@ vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_tum(
@@ -787,7 +787,7 @@ vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_tum(
@@ -796,7 +796,7 @@ vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_tum(
@@ -805,7 +805,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_tum(
@@ -814,7 +814,7 @@ vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_tum(
@@ -823,7 +823,7 @@ vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_tum(
@@ -832,7 +832,7 @@ vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_tum(
@@ -841,7 +841,7 @@ vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_tum(
@@ -850,7 +850,7 @@ vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_tum(
@@ -859,7 +859,7 @@ vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_tum(
@@ -868,7 +868,7 @@ vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_tum(
@@ -877,7 +877,7 @@ vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_tum(
@@ -886,7 +886,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_tum(
@@ -895,7 +895,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_tum(
@@ -904,7 +904,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_tum(
@@ -913,7 +913,7 @@ vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_tum(
@@ -922,7 +922,7 @@ vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_tum(
@@ -931,7 +931,7 @@ vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_tum(
@@ -940,7 +940,7 @@ vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_tum(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_tum(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_tum(
@@ -967,7 +967,7 @@ vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_tum(
@@ -976,7 +976,7 @@ vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_tum(
@@ -985,7 +985,7 @@ vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_tum(
@@ -994,7 +994,7 @@ vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_tum(
@@ -1003,7 +1003,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_tum(
@@ -1012,7 +1012,7 @@ vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_tum(
@@ -1021,7 +1021,7 @@ vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_tum(
@@ -1030,7 +1030,7 @@ vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_tum(
@@ -1039,7 +1039,7 @@ vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_tum(
@@ -1048,7 +1048,7 @@ vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_tum(
@@ -1057,7 +1057,7 @@ vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_tum(
@@ -1066,7 +1066,7 @@ vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_tumu(
@@ -1075,7 +1075,7 @@ vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_tumu(
@@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_tumu(
@@ -1093,7 +1093,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_tumu(
@@ -1102,7 +1102,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_tumu(
@@ -1111,7 +1111,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_tumu(
@@ -1120,7 +1120,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_tumu(
@@ -1129,7 +1129,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_tumu(
@@ -1138,7 +1138,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_tumu(
@@ -1147,7 +1147,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_tumu(
@@ -1156,7 +1156,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_tumu(
@@ -1165,7 +1165,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_tumu(
@@ -1174,7 +1174,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_tumu(
@@ -1183,7 +1183,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_tumu(
@@ -1192,7 +1192,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_tumu(
@@ -1201,7 +1201,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_tumu(
@@ -1210,7 +1210,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_tumu(
@@ -1219,7 +1219,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_tumu(
@@ -1228,7 +1228,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_tumu(
@@ -1237,7 +1237,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_tumu(
@@ -1246,7 +1246,7 @@ vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_tumu(
@@ -1255,7 +1255,7 @@ vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_tumu(
@@ -1264,7 +1264,7 @@ vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_tumu(
@@ -1273,7 +1273,7 @@ vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_tumu(
@@ -1282,7 +1282,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_tumu(
@@ -1291,7 +1291,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_tumu(
@@ -1300,7 +1300,7 @@ vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_tumu(
@@ -1309,7 +1309,7 @@ vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_tumu(
@@ -1318,7 +1318,7 @@ vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_tumu(
@@ -1327,7 +1327,7 @@ vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_tumu(
@@ -1336,7 +1336,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_tumu(
@@ -1345,7 +1345,7 @@ vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_tumu(
@@ -1354,7 +1354,7 @@ vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_tumu(
@@ -1363,7 +1363,7 @@ vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_tumu(
@@ -1372,7 +1372,7 @@ vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_tumu(
@@ -1381,7 +1381,7 @@ vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_tumu(
@@ -1390,7 +1390,7 @@ vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_tumu(
@@ -1399,7 +1399,7 @@ vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_tumu(
@@ -1408,7 +1408,7 @@ vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_tumu(
@@ -1417,7 +1417,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_tumu(
@@ -1426,7 +1426,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_tumu(
@@ -1435,7 +1435,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_tumu(
@@ -1444,7 +1444,7 @@ vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_tumu(
@@ -1453,7 +1453,7 @@ vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_tumu(
@@ -1462,7 +1462,7 @@ vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_tumu(
@@ -1471,7 +1471,7 @@ vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_tumu(
@@ -1480,7 +1480,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_tumu(
@@ -1489,7 +1489,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_tumu(
@@ -1498,7 +1498,7 @@ vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_tumu(
@@ -1507,7 +1507,7 @@ vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_tumu(
@@ -1516,7 +1516,7 @@ vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_tumu(
@@ -1525,7 +1525,7 @@ vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_tumu(
@@ -1534,7 +1534,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_tumu(
@@ -1543,7 +1543,7 @@ vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_tumu(
@@ -1552,7 +1552,7 @@ vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_tumu(
@@ -1561,7 +1561,7 @@ vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_tumu(
@@ -1570,7 +1570,7 @@ vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_tumu(
@@ -1579,7 +1579,7 @@ vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_tumu(
@@ -1588,7 +1588,7 @@ vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_tumu(
@@ -1597,7 +1597,7 @@ vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_mu(
@@ -1606,7 +1606,7 @@ vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_mu(
@@ -1615,7 +1615,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_mu(
@@ -1624,7 +1624,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_mu(
@@ -1633,7 +1633,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_mu(
@@ -1642,7 +1642,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_mu(
@@ -1651,7 +1651,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_mu(
@@ -1660,7 +1660,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_mu(
@@ -1669,7 +1669,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_mu(
@@ -1678,7 +1678,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_mu(
@@ -1687,7 +1687,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_mu(
@@ -1696,7 +1696,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_mu(
@@ -1705,7 +1705,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_mu(
@@ -1714,7 +1714,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_mu(
@@ -1723,7 +1723,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_mu(
@@ -1732,7 +1732,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_mu(
@@ -1741,7 +1741,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_mu(
@@ -1750,7 +1750,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_mu(
@@ -1759,7 +1759,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_mu(
@@ -1768,7 +1768,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_mu(
@@ -1777,7 +1777,7 @@ vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_mu(
@@ -1786,7 +1786,7 @@ vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_mu(
@@ -1795,7 +1795,7 @@ vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_mu(
@@ -1804,7 +1804,7 @@ vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_mu(
@@ -1813,7 +1813,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_mu(
@@ -1822,7 +1822,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_mu(
@@ -1831,7 +1831,7 @@ vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_mu(
@@ -1840,7 +1840,7 @@ vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_mu(
@@ -1849,7 +1849,7 @@ vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_mu(
@@ -1858,7 +1858,7 @@ vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_mu(
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_mu(
@@ -1876,7 +1876,7 @@ vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_mu(
@@ -1885,7 +1885,7 @@ vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_mu(
@@ -1894,7 +1894,7 @@ vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_mu(
@@ -1903,7 +1903,7 @@ vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_mu(
@@ -1912,7 +1912,7 @@ vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_mu(
@@ -1921,7 +1921,7 @@ vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_mu(
@@ -1930,7 +1930,7 @@ vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_mu(
@@ -1939,7 +1939,7 @@ vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_mu(
@@ -1948,7 +1948,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_mu(
@@ -1957,7 +1957,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_mu(
@@ -1966,7 +1966,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_mu(
@@ -1975,7 +1975,7 @@ vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_mu(
@@ -1984,7 +1984,7 @@ vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_mu(
@@ -1993,7 +1993,7 @@ vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_mu(
@@ -2002,7 +2002,7 @@ vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_mu(
@@ -2011,7 +2011,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_mu(
@@ -2020,7 +2020,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_mu(
@@ -2029,7 +2029,7 @@ vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_mu(
@@ -2038,7 +2038,7 @@ vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_mu(
@@ -2047,7 +2047,7 @@ vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_mu(
@@ -2056,7 +2056,7 @@ vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_mu(
@@ -2065,7 +2065,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_mu(
@@ -2074,7 +2074,7 @@ vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_mu(
@@ -2083,7 +2083,7 @@ vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_mu(
@@ -2092,7 +2092,7 @@ vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_mu(
@@ -2101,7 +2101,7 @@ vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_mu(
@@ -2110,7 +2110,7 @@ vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_mu(
@@ -2119,7 +2119,7 @@ vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_mu(
@@ -2128,6 +2128,6 @@ vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vloxei8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c
index 5a580b88ccf5..12048f399a9f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vloxseg2ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vloxseg2ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vloxseg2ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_tu(
@@ -69,7 +69,7 @@ void test_vloxseg2ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_tu(
@@ -82,7 +82,7 @@ void test_vloxseg2ei16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_tu(
@@ -95,7 +95,7 @@ void test_vloxseg2ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_tu(
@@ -108,7 +108,7 @@ void test_vloxseg2ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_tu(
@@ -121,7 +121,7 @@ void test_vloxseg2ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_tu(
@@ -134,7 +134,7 @@ void test_vloxseg2ei16_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_tu(
@@ -147,7 +147,7 @@ void test_vloxseg2ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_tu(
@@ -160,7 +160,7 @@ void test_vloxseg2ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_tu(
@@ -173,7 +173,7 @@ void test_vloxseg2ei16_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_tu(
@@ -186,7 +186,7 @@ void test_vloxseg2ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_tu(
@@ -199,7 +199,7 @@ void test_vloxseg2ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_tu(
@@ -212,7 +212,7 @@ void test_vloxseg2ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_tu(
@@ -225,7 +225,7 @@ void test_vloxseg2ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_tu(
@@ -238,7 +238,7 @@ void test_vloxseg2ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_tu(
@@ -251,7 +251,7 @@ void test_vloxseg2ei16_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_tu(
@@ -264,7 +264,7 @@ void test_vloxseg2ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vloxseg2ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_tu(
@@ -290,7 +290,7 @@ void test_vloxseg2ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_tu(
@@ -303,7 +303,7 @@ void test_vloxseg2ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_tu(
@@ -316,7 +316,7 @@ void test_vloxseg2ei16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_tu(
@@ -329,7 +329,7 @@ void test_vloxseg2ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_tu(
@@ -342,7 +342,7 @@ void test_vloxseg2ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_tu(
@@ -355,7 +355,7 @@ void test_vloxseg2ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_tu(
@@ -368,7 +368,7 @@ void test_vloxseg2ei16_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_tu(
@@ -381,7 +381,7 @@ void test_vloxseg2ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_tu(
@@ -394,7 +394,7 @@ void test_vloxseg2ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_tu(
@@ -407,7 +407,7 @@ void test_vloxseg2ei16_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_tu(
@@ -420,7 +420,7 @@ void test_vloxseg2ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_tu(
@@ -433,7 +433,7 @@ void test_vloxseg2ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_tu(
@@ -446,7 +446,7 @@ void test_vloxseg2ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_tu(
@@ -459,7 +459,7 @@ void test_vloxseg2ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_tu(
@@ -472,7 +472,7 @@ void test_vloxseg2ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_tu(
@@ -485,7 +485,7 @@ void test_vloxseg2ei16_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_tu(
@@ -498,7 +498,7 @@ void test_vloxseg2ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_tu(
@@ -511,7 +511,7 @@ void test_vloxseg2ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_tu(
@@ -524,7 +524,7 @@ void test_vloxseg2ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_tu(
@@ -537,7 +537,7 @@ void test_vloxseg2ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_tu(
@@ -550,7 +550,7 @@ void test_vloxseg2ei16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_tu(
@@ -563,7 +563,7 @@ void test_vloxseg2ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_tu(
@@ -576,7 +576,7 @@ void test_vloxseg2ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_tu(
@@ -589,7 +589,7 @@ void test_vloxseg2ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vloxseg2ei16_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_tu(
@@ -615,7 +615,7 @@ void test_vloxseg2ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_tu(
@@ -628,7 +628,7 @@ void test_vloxseg2ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_tum(
@@ -641,7 +641,7 @@ void test_vloxseg2ei16_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_tum(
@@ -654,7 +654,7 @@ void test_vloxseg2ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_tum(
@@ -667,7 +667,7 @@ void test_vloxseg2ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_tum(
@@ -680,7 +680,7 @@ void test_vloxseg2ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_tum(
@@ -693,7 +693,7 @@ void test_vloxseg2ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_tum(
@@ -706,7 +706,7 @@ void test_vloxseg2ei16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_tum(
@@ -719,7 +719,7 @@ void test_vloxseg2ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_tum(
@@ -732,7 +732,7 @@ void test_vloxseg2ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_tum(
@@ -745,7 +745,7 @@ void test_vloxseg2ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_tum(
@@ -758,7 +758,7 @@ void test_vloxseg2ei16_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_tum(
@@ -771,7 +771,7 @@ void test_vloxseg2ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_tum(
@@ -784,7 +784,7 @@ void test_vloxseg2ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_tum(
@@ -797,7 +797,7 @@ void test_vloxseg2ei16_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_tum(
@@ -810,7 +810,7 @@ void test_vloxseg2ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_tum(
@@ -823,7 +823,7 @@ void test_vloxseg2ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_tum(
@@ -836,7 +836,7 @@ void test_vloxseg2ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_tum(
@@ -849,7 +849,7 @@ void test_vloxseg2ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_tum(
@@ -862,7 +862,7 @@ void test_vloxseg2ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_tum(
@@ -875,7 +875,7 @@ void test_vloxseg2ei16_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_tum(
@@ -888,7 +888,7 @@ void test_vloxseg2ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vloxseg2ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_tum(
@@ -914,7 +914,7 @@ void test_vloxseg2ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_tum(
@@ -927,7 +927,7 @@ void test_vloxseg2ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_tum(
@@ -940,7 +940,7 @@ void test_vloxseg2ei16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_tum(
@@ -953,7 +953,7 @@ void test_vloxseg2ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_tum(
@@ -966,7 +966,7 @@ void test_vloxseg2ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_tum(
@@ -979,7 +979,7 @@ void test_vloxseg2ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_tum(
@@ -992,7 +992,7 @@ void test_vloxseg2ei16_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_tum(
@@ -1005,7 +1005,7 @@ void test_vloxseg2ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_tum(
@@ -1018,7 +1018,7 @@ void test_vloxseg2ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_tum(
@@ -1031,7 +1031,7 @@ void test_vloxseg2ei16_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_tum(
@@ -1044,7 +1044,7 @@ void test_vloxseg2ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_tum(
@@ -1057,7 +1057,7 @@ void test_vloxseg2ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_tum(
@@ -1070,7 +1070,7 @@ void test_vloxseg2ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_tum(
@@ -1083,7 +1083,7 @@ void test_vloxseg2ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_tum(
@@ -1096,7 +1096,7 @@ void test_vloxseg2ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_tum(
@@ -1109,7 +1109,7 @@ void test_vloxseg2ei16_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_tum(
@@ -1122,7 +1122,7 @@ void test_vloxseg2ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_tum(
@@ -1135,7 +1135,7 @@ void test_vloxseg2ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_tum(
@@ -1148,7 +1148,7 @@ void test_vloxseg2ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_tum(
@@ -1161,7 +1161,7 @@ void test_vloxseg2ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_tum(
@@ -1174,7 +1174,7 @@ void test_vloxseg2ei16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_tum(
@@ -1187,7 +1187,7 @@ void test_vloxseg2ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_tum(
@@ -1200,7 +1200,7 @@ void test_vloxseg2ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_tum(
@@ -1213,7 +1213,7 @@ void test_vloxseg2ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_tum(
@@ -1226,7 +1226,7 @@ void test_vloxseg2ei16_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_tum(
@@ -1239,7 +1239,7 @@ void test_vloxseg2ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_tum(
@@ -1252,7 +1252,7 @@ void test_vloxseg2ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_tumu(
@@ -1265,7 +1265,7 @@ void test_vloxseg2ei16_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_tumu(
@@ -1278,7 +1278,7 @@ void test_vloxseg2ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_tumu(
@@ -1291,7 +1291,7 @@ void test_vloxseg2ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_tumu(
@@ -1304,7 +1304,7 @@ void test_vloxseg2ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_tumu(
@@ -1317,7 +1317,7 @@ void test_vloxseg2ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_tumu(
@@ -1330,7 +1330,7 @@ void test_vloxseg2ei16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_tumu(
@@ -1343,7 +1343,7 @@ void test_vloxseg2ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_tumu(
@@ -1356,7 +1356,7 @@ void test_vloxseg2ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg2ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_tumu(
@@ -1382,7 +1382,7 @@ void test_vloxseg2ei16_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_tumu(
@@ -1395,7 +1395,7 @@ void test_vloxseg2ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_tumu(
@@ -1408,7 +1408,7 @@ void test_vloxseg2ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_tumu(
@@ -1421,7 +1421,7 @@ void test_vloxseg2ei16_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_tumu(
@@ -1434,7 +1434,7 @@ void test_vloxseg2ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_tumu(
@@ -1447,7 +1447,7 @@ void test_vloxseg2ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_tumu(
@@ -1460,7 +1460,7 @@ void test_vloxseg2ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_tumu(
@@ -1473,7 +1473,7 @@ void test_vloxseg2ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_tumu(
@@ -1486,7 +1486,7 @@ void test_vloxseg2ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_tumu(
@@ -1499,7 +1499,7 @@ void test_vloxseg2ei16_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_tumu(
@@ -1512,7 +1512,7 @@ void test_vloxseg2ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_tumu(
@@ -1525,7 +1525,7 @@ void test_vloxseg2ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_tumu(
@@ -1538,7 +1538,7 @@ void test_vloxseg2ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_tumu(
@@ -1551,7 +1551,7 @@ void test_vloxseg2ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vloxseg2ei16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_tumu(
@@ -1577,7 +1577,7 @@ void test_vloxseg2ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_tumu(
@@ -1590,7 +1590,7 @@ void test_vloxseg2ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_tumu(
@@ -1603,7 +1603,7 @@ void test_vloxseg2ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_tumu(
@@ -1616,7 +1616,7 @@ void test_vloxseg2ei16_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_tumu(
@@ -1629,7 +1629,7 @@ void test_vloxseg2ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_tumu(
@@ -1642,7 +1642,7 @@ void test_vloxseg2ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_tumu(
@@ -1655,7 +1655,7 @@ void test_vloxseg2ei16_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_tumu(
@@ -1668,7 +1668,7 @@ void test_vloxseg2ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_tumu(
@@ -1681,7 +1681,7 @@ void test_vloxseg2ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_tumu(
@@ -1694,7 +1694,7 @@ void test_vloxseg2ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_tumu(
@@ -1707,7 +1707,7 @@ void test_vloxseg2ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_tumu(
@@ -1720,7 +1720,7 @@ void test_vloxseg2ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_tumu(
@@ -1733,7 +1733,7 @@ void test_vloxseg2ei16_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_tumu(
@@ -1746,7 +1746,7 @@ void test_vloxseg2ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_tumu(
@@ -1759,7 +1759,7 @@ void test_vloxseg2ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_tumu(
@@ -1772,7 +1772,7 @@ void test_vloxseg2ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_tumu(
@@ -1785,7 +1785,7 @@ void test_vloxseg2ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_tumu(
@@ -1798,7 +1798,7 @@ void test_vloxseg2ei16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_tumu(
@@ -1811,7 +1811,7 @@ void test_vloxseg2ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_tumu(
@@ -1824,7 +1824,7 @@ void test_vloxseg2ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_tumu(
@@ -1837,7 +1837,7 @@ void test_vloxseg2ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_tumu(
@@ -1850,7 +1850,7 @@ void test_vloxseg2ei16_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_tumu(
@@ -1863,7 +1863,7 @@ void test_vloxseg2ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_tumu(
@@ -1876,7 +1876,7 @@ void test_vloxseg2ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_mu(
@@ -1889,7 +1889,7 @@ void test_vloxseg2ei16_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_mu(
@@ -1902,7 +1902,7 @@ void test_vloxseg2ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vloxseg2ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_mu(
@@ -1928,7 +1928,7 @@ void test_vloxseg2ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_mu(
@@ -1941,7 +1941,7 @@ void test_vloxseg2ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vloxseg2ei16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_mu(
@@ -1967,7 +1967,7 @@ void test_vloxseg2ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_mu(
@@ -1980,7 +1980,7 @@ void test_vloxseg2ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_mu(
@@ -1993,7 +1993,7 @@ void test_vloxseg2ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_mu(
@@ -2006,7 +2006,7 @@ void test_vloxseg2ei16_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_mu(
@@ -2019,7 +2019,7 @@ void test_vloxseg2ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_mu(
@@ -2032,7 +2032,7 @@ void test_vloxseg2ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_mu(
@@ -2045,7 +2045,7 @@ void test_vloxseg2ei16_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_mu(
@@ -2058,7 +2058,7 @@ void test_vloxseg2ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_mu(
@@ -2071,7 +2071,7 @@ void test_vloxseg2ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_mu(
@@ -2084,7 +2084,7 @@ void test_vloxseg2ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_mu(
@@ -2097,7 +2097,7 @@ void test_vloxseg2ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_mu(
@@ -2110,7 +2110,7 @@ void test_vloxseg2ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_mu(
@@ -2123,7 +2123,7 @@ void test_vloxseg2ei16_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_mu(
@@ -2136,7 +2136,7 @@ void test_vloxseg2ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_mu(
@@ -2149,7 +2149,7 @@ void test_vloxseg2ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_mu(
@@ -2162,7 +2162,7 @@ void test_vloxseg2ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_mu(
@@ -2175,7 +2175,7 @@ void test_vloxseg2ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_mu(
@@ -2188,7 +2188,7 @@ void test_vloxseg2ei16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_mu(
@@ -2201,7 +2201,7 @@ void test_vloxseg2ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_mu(
@@ -2214,7 +2214,7 @@ void test_vloxseg2ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_mu(
@@ -2227,7 +2227,7 @@ void test_vloxseg2ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_mu(
@@ -2240,7 +2240,7 @@ void test_vloxseg2ei16_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_mu(
@@ -2253,7 +2253,7 @@ void test_vloxseg2ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_mu(
@@ -2266,7 +2266,7 @@ void test_vloxseg2ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_mu(
@@ -2279,7 +2279,7 @@ void test_vloxseg2ei16_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_mu(
@@ -2292,7 +2292,7 @@ void test_vloxseg2ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_mu(
@@ -2305,7 +2305,7 @@ void test_vloxseg2ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_mu(
@@ -2318,7 +2318,7 @@ void test_vloxseg2ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_mu(
@@ -2331,7 +2331,7 @@ void test_vloxseg2ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_mu(
@@ -2344,7 +2344,7 @@ void test_vloxseg2ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_mu(
@@ -2357,7 +2357,7 @@ void test_vloxseg2ei16_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_mu(
@@ -2370,7 +2370,7 @@ void test_vloxseg2ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_mu(
@@ -2383,7 +2383,7 @@ void test_vloxseg2ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_mu(
@@ -2396,7 +2396,7 @@ void test_vloxseg2ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_mu(
@@ -2409,7 +2409,7 @@ void test_vloxseg2ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_mu(
@@ -2422,7 +2422,7 @@ void test_vloxseg2ei16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_mu(
@@ -2435,7 +2435,7 @@ void test_vloxseg2ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_mu(
@@ -2448,7 +2448,7 @@ void test_vloxseg2ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_mu(
@@ -2461,7 +2461,7 @@ void test_vloxseg2ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_mu(
@@ -2474,7 +2474,7 @@ void test_vloxseg2ei16_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_mu(
@@ -2487,7 +2487,7 @@ void test_vloxseg2ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_mu(
@@ -2500,6 +2500,6 @@ void test_vloxseg2ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei16_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c
index daebfee1b2b5..26b217d039fb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vloxseg2ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vloxseg2ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vloxseg2ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_tu(
@@ -69,7 +69,7 @@ void test_vloxseg2ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_tu(
@@ -82,7 +82,7 @@ void test_vloxseg2ei32_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_tu(
@@ -95,7 +95,7 @@ void test_vloxseg2ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_tu(
@@ -108,7 +108,7 @@ void test_vloxseg2ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_tu(
@@ -121,7 +121,7 @@ void test_vloxseg2ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_tu(
@@ -134,7 +134,7 @@ void test_vloxseg2ei32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_tu(
@@ -147,7 +147,7 @@ void test_vloxseg2ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_tu(
@@ -160,7 +160,7 @@ void test_vloxseg2ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_tu(
@@ -173,7 +173,7 @@ void test_vloxseg2ei32_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_tu(
@@ -186,7 +186,7 @@ void test_vloxseg2ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_tu(
@@ -199,7 +199,7 @@ void test_vloxseg2ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_tu(
@@ -212,7 +212,7 @@ void test_vloxseg2ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_tu(
@@ -225,7 +225,7 @@ void test_vloxseg2ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_tu(
@@ -238,7 +238,7 @@ void test_vloxseg2ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_tu(
@@ -251,7 +251,7 @@ void test_vloxseg2ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_tu(
@@ -264,7 +264,7 @@ void test_vloxseg2ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_tu(
@@ -277,7 +277,7 @@ void test_vloxseg2ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_tu(
@@ -290,7 +290,7 @@ void test_vloxseg2ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tu(
@@ -303,7 +303,7 @@ void test_vloxseg2ei32_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_tu(
@@ -316,7 +316,7 @@ void test_vloxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_tu(
@@ -329,7 +329,7 @@ void test_vloxseg2ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_tu(
@@ -342,7 +342,7 @@ void test_vloxseg2ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_tu(
@@ -355,7 +355,7 @@ void test_vloxseg2ei32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_tu(
@@ -368,7 +368,7 @@ void test_vloxseg2ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_tu(
@@ -381,7 +381,7 @@ void test_vloxseg2ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_tu(
@@ -394,7 +394,7 @@ void test_vloxseg2ei32_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_tu(
@@ -407,7 +407,7 @@ void test_vloxseg2ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_tu(
@@ -420,7 +420,7 @@ void test_vloxseg2ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_tu(
@@ -433,7 +433,7 @@ void test_vloxseg2ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_tu(
@@ -446,7 +446,7 @@ void test_vloxseg2ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_tu(
@@ -459,7 +459,7 @@ void test_vloxseg2ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_tu(
@@ -472,7 +472,7 @@ void test_vloxseg2ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_tu(
@@ -485,7 +485,7 @@ void test_vloxseg2ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_tu(
@@ -498,7 +498,7 @@ void test_vloxseg2ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_tu(
@@ -511,7 +511,7 @@ void test_vloxseg2ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_tu(
@@ -524,7 +524,7 @@ void test_vloxseg2ei32_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_tu(
@@ -537,7 +537,7 @@ void test_vloxseg2ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_tu(
@@ -550,7 +550,7 @@ void test_vloxseg2ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_tu(
@@ -563,7 +563,7 @@ void test_vloxseg2ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_tu(
@@ -576,7 +576,7 @@ void test_vloxseg2ei32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_tu(
@@ -589,7 +589,7 @@ void test_vloxseg2ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_tu(
@@ -602,7 +602,7 @@ void test_vloxseg2ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_tum(
@@ -615,7 +615,7 @@ void test_vloxseg2ei32_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_tum(
@@ -628,7 +628,7 @@ void test_vloxseg2ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_tum(
@@ -641,7 +641,7 @@ void test_vloxseg2ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_tum(
@@ -654,7 +654,7 @@ void test_vloxseg2ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_tum(
@@ -667,7 +667,7 @@ void test_vloxseg2ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_tum(
@@ -680,7 +680,7 @@ void test_vloxseg2ei32_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_tum(
@@ -693,7 +693,7 @@ void test_vloxseg2ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_tum(
@@ -706,7 +706,7 @@ void test_vloxseg2ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_tum(
@@ -719,7 +719,7 @@ void test_vloxseg2ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_tum(
@@ -732,7 +732,7 @@ void test_vloxseg2ei32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_tum(
@@ -745,7 +745,7 @@ void test_vloxseg2ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_tum(
@@ -758,7 +758,7 @@ void test_vloxseg2ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_tum(
@@ -771,7 +771,7 @@ void test_vloxseg2ei32_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_tum(
@@ -784,7 +784,7 @@ void test_vloxseg2ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_tum(
@@ -797,7 +797,7 @@ void test_vloxseg2ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_tum(
@@ -810,7 +810,7 @@ void test_vloxseg2ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_tum(
@@ -823,7 +823,7 @@ void test_vloxseg2ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_tum(
@@ -836,7 +836,7 @@ void test_vloxseg2ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_tum(
@@ -849,7 +849,7 @@ void test_vloxseg2ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_tum(
@@ -862,7 +862,7 @@ void test_vloxseg2ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_tum(
@@ -875,7 +875,7 @@ void test_vloxseg2ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_tum(
@@ -888,7 +888,7 @@ void test_vloxseg2ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tum(
@@ -901,7 +901,7 @@ void test_vloxseg2ei32_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_tum(
@@ -914,7 +914,7 @@ void test_vloxseg2ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_tum(
@@ -927,7 +927,7 @@ void test_vloxseg2ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_tum(
@@ -940,7 +940,7 @@ void test_vloxseg2ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_tum(
@@ -953,7 +953,7 @@ void test_vloxseg2ei32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_tum(
@@ -966,7 +966,7 @@ void test_vloxseg2ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_tum(
@@ -979,7 +979,7 @@ void test_vloxseg2ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_tum(
@@ -992,7 +992,7 @@ void test_vloxseg2ei32_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_tum(
@@ -1005,7 +1005,7 @@ void test_vloxseg2ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_tum(
@@ -1018,7 +1018,7 @@ void test_vloxseg2ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_tum(
@@ -1031,7 +1031,7 @@ void test_vloxseg2ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_tum(
@@ -1044,7 +1044,7 @@ void test_vloxseg2ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_tum(
@@ -1057,7 +1057,7 @@ void test_vloxseg2ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_tum(
@@ -1070,7 +1070,7 @@ void test_vloxseg2ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_tum(
@@ -1083,7 +1083,7 @@ void test_vloxseg2ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_tum(
@@ -1096,7 +1096,7 @@ void test_vloxseg2ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_tum(
@@ -1109,7 +1109,7 @@ void test_vloxseg2ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_tum(
@@ -1122,7 +1122,7 @@ void test_vloxseg2ei32_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_tum(
@@ -1135,7 +1135,7 @@ void test_vloxseg2ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_tum(
@@ -1148,7 +1148,7 @@ void test_vloxseg2ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_tum(
@@ -1161,7 +1161,7 @@ void test_vloxseg2ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_tum(
@@ -1174,7 +1174,7 @@ void test_vloxseg2ei32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_tum(
@@ -1187,7 +1187,7 @@ void test_vloxseg2ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_tum(
@@ -1200,7 +1200,7 @@ void test_vloxseg2ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_tumu(
@@ -1213,7 +1213,7 @@ void test_vloxseg2ei32_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_tumu(
@@ -1226,7 +1226,7 @@ void test_vloxseg2ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vloxseg2ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_tumu(
@@ -1252,7 +1252,7 @@ void test_vloxseg2ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_tumu(
@@ -1265,7 +1265,7 @@ void test_vloxseg2ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_tumu(
@@ -1278,7 +1278,7 @@ void test_vloxseg2ei32_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_tumu(
@@ -1291,7 +1291,7 @@ void test_vloxseg2ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_tumu(
@@ -1304,7 +1304,7 @@ void test_vloxseg2ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_tumu(
@@ -1317,7 +1317,7 @@ void test_vloxseg2ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_tumu(
@@ -1330,7 +1330,7 @@ void test_vloxseg2ei32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_tumu(
@@ -1343,7 +1343,7 @@ void test_vloxseg2ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_tumu(
@@ -1356,7 +1356,7 @@ void test_vloxseg2ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg2ei32_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_tumu(
@@ -1382,7 +1382,7 @@ void test_vloxseg2ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_tumu(
@@ -1395,7 +1395,7 @@ void test_vloxseg2ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_tumu(
@@ -1408,7 +1408,7 @@ void test_vloxseg2ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_tumu(
@@ -1421,7 +1421,7 @@ void test_vloxseg2ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_tumu(
@@ -1434,7 +1434,7 @@ void test_vloxseg2ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_tumu(
@@ -1447,7 +1447,7 @@ void test_vloxseg2ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_tumu(
@@ -1460,7 +1460,7 @@ void test_vloxseg2ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_tumu(
@@ -1473,7 +1473,7 @@ void test_vloxseg2ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_tumu(
@@ -1486,7 +1486,7 @@ void test_vloxseg2ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tumu(
@@ -1499,7 +1499,7 @@ void test_vloxseg2ei32_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_tumu(
@@ -1512,7 +1512,7 @@ void test_vloxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_tumu(
@@ -1525,7 +1525,7 @@ void test_vloxseg2ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_tumu(
@@ -1538,7 +1538,7 @@ void test_vloxseg2ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vloxseg2ei32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_tumu(
@@ -1564,7 +1564,7 @@ void test_vloxseg2ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_tumu(
@@ -1577,7 +1577,7 @@ void test_vloxseg2ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_tumu(
@@ -1590,7 +1590,7 @@ void test_vloxseg2ei32_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_tumu(
@@ -1603,7 +1603,7 @@ void test_vloxseg2ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_tumu(
@@ -1616,7 +1616,7 @@ void test_vloxseg2ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vloxseg2ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_tumu(
@@ -1642,7 +1642,7 @@ void test_vloxseg2ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_tumu(
@@ -1655,7 +1655,7 @@ void test_vloxseg2ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_tumu(
@@ -1668,7 +1668,7 @@ void test_vloxseg2ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_tumu(
@@ -1681,7 +1681,7 @@ void test_vloxseg2ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_tumu(
@@ -1694,7 +1694,7 @@ void test_vloxseg2ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_tumu(
@@ -1707,7 +1707,7 @@ void test_vloxseg2ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_tumu(
@@ -1720,7 +1720,7 @@ void test_vloxseg2ei32_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_tumu(
@@ -1733,7 +1733,7 @@ void test_vloxseg2ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_tumu(
@@ -1746,7 +1746,7 @@ void test_vloxseg2ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_tumu(
@@ -1759,7 +1759,7 @@ void test_vloxseg2ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_tumu(
@@ -1772,7 +1772,7 @@ void test_vloxseg2ei32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_tumu(
@@ -1785,7 +1785,7 @@ void test_vloxseg2ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_tumu(
@@ -1798,7 +1798,7 @@ void test_vloxseg2ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_mu(
@@ -1811,7 +1811,7 @@ void test_vloxseg2ei32_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_mu(
@@ -1824,7 +1824,7 @@ void test_vloxseg2ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_mu(
@@ -1837,7 +1837,7 @@ void test_vloxseg2ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_mu(
@@ -1850,7 +1850,7 @@ void test_vloxseg2ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_mu(
@@ -1863,7 +1863,7 @@ void test_vloxseg2ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_mu(
@@ -1876,7 +1876,7 @@ void test_vloxseg2ei32_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_mu(
@@ -1889,7 +1889,7 @@ void test_vloxseg2ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_mu(
@@ -1902,7 +1902,7 @@ void test_vloxseg2ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_mu(
@@ -1915,7 +1915,7 @@ void test_vloxseg2ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_mu(
@@ -1928,7 +1928,7 @@ void test_vloxseg2ei32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_mu(
@@ -1941,7 +1941,7 @@ void test_vloxseg2ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_mu(
@@ -1954,7 +1954,7 @@ void test_vloxseg2ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_mu(
@@ -1967,7 +1967,7 @@ void test_vloxseg2ei32_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_mu(
@@ -1980,7 +1980,7 @@ void test_vloxseg2ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_mu(
@@ -1993,7 +1993,7 @@ void test_vloxseg2ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_mu(
@@ -2006,7 +2006,7 @@ void test_vloxseg2ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_mu(
@@ -2019,7 +2019,7 @@ void test_vloxseg2ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_mu(
@@ -2032,7 +2032,7 @@ void test_vloxseg2ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_mu(
@@ -2045,7 +2045,7 @@ void test_vloxseg2ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_mu(
@@ -2058,7 +2058,7 @@ void test_vloxseg2ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_mu(
@@ -2071,7 +2071,7 @@ void test_vloxseg2ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_mu(
@@ -2084,7 +2084,7 @@ void test_vloxseg2ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_mu(
@@ -2097,7 +2097,7 @@ void test_vloxseg2ei32_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_mu(
@@ -2110,7 +2110,7 @@ void test_vloxseg2ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_mu(
@@ -2123,7 +2123,7 @@ void test_vloxseg2ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_mu(
@@ -2136,7 +2136,7 @@ void test_vloxseg2ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_mu(
@@ -2149,7 +2149,7 @@ void test_vloxseg2ei32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_mu(
@@ -2162,7 +2162,7 @@ void test_vloxseg2ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_mu(
@@ -2175,7 +2175,7 @@ void test_vloxseg2ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_mu(
@@ -2188,7 +2188,7 @@ void test_vloxseg2ei32_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_mu(
@@ -2201,7 +2201,7 @@ void test_vloxseg2ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vloxseg2ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_mu(
@@ -2227,7 +2227,7 @@ void test_vloxseg2ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_mu(
@@ -2240,7 +2240,7 @@ void test_vloxseg2ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_mu(
@@ -2253,7 +2253,7 @@ void test_vloxseg2ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_mu(
@@ -2266,7 +2266,7 @@ void test_vloxseg2ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vloxseg2ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_mu(
@@ -2292,7 +2292,7 @@ void test_vloxseg2ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_mu(
@@ -2305,7 +2305,7 @@ void test_vloxseg2ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_mu(
@@ -2318,7 +2318,7 @@ void test_vloxseg2ei32_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_mu(
@@ -2331,7 +2331,7 @@ void test_vloxseg2ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_mu(
@@ -2344,7 +2344,7 @@ void test_vloxseg2ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_mu(
@@ -2357,7 +2357,7 @@ void test_vloxseg2ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_mu(
@@ -2370,7 +2370,7 @@ void test_vloxseg2ei32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_mu(
@@ -2383,7 +2383,7 @@ void test_vloxseg2ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_mu(
@@ -2396,6 +2396,6 @@ void test_vloxseg2ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei32_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c
index aeddde317485..9dc6affd8bdc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vloxseg2ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vloxseg2ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vloxseg2ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_tu(
@@ -69,7 +69,7 @@ void test_vloxseg2ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_tu(
@@ -82,7 +82,7 @@ void test_vloxseg2ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_tu(
@@ -95,7 +95,7 @@ void test_vloxseg2ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_tu(
@@ -108,7 +108,7 @@ void test_vloxseg2ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_tu(
@@ -121,7 +121,7 @@ void test_vloxseg2ei64_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_tu(
@@ -134,7 +134,7 @@ void test_vloxseg2ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_tu(
@@ -147,7 +147,7 @@ void test_vloxseg2ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_tu(
@@ -160,7 +160,7 @@ void test_vloxseg2ei64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_tu(
@@ -173,7 +173,7 @@ void test_vloxseg2ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_tu(
@@ -186,7 +186,7 @@ void test_vloxseg2ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vloxseg2ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_tu(
@@ -212,7 +212,7 @@ void test_vloxseg2ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_tu(
@@ -225,7 +225,7 @@ void test_vloxseg2ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_tu(
@@ -238,7 +238,7 @@ void test_vloxseg2ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_tu(
@@ -251,7 +251,7 @@ void test_vloxseg2ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_tu(
@@ -264,7 +264,7 @@ void test_vloxseg2ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_tu(
@@ -277,7 +277,7 @@ void test_vloxseg2ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_tu(
@@ -290,7 +290,7 @@ void test_vloxseg2ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_tu(
@@ -303,7 +303,7 @@ void test_vloxseg2ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_tu(
@@ -316,7 +316,7 @@ void test_vloxseg2ei64_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_tu(
@@ -329,7 +329,7 @@ void test_vloxseg2ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_tu(
@@ -342,7 +342,7 @@ void test_vloxseg2ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_tu(
@@ -355,7 +355,7 @@ void test_vloxseg2ei64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_tu(
@@ -368,7 +368,7 @@ void test_vloxseg2ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_tu(
@@ -381,7 +381,7 @@ void test_vloxseg2ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_tu(
@@ -394,7 +394,7 @@ void test_vloxseg2ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_tu(
@@ -407,7 +407,7 @@ void test_vloxseg2ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_tu(
@@ -420,7 +420,7 @@ void test_vloxseg2ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_tu(
@@ -433,7 +433,7 @@ void test_vloxseg2ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_tu(
@@ -446,7 +446,7 @@ void test_vloxseg2ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_tu(
@@ -459,7 +459,7 @@ void test_vloxseg2ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_tu(
@@ -472,7 +472,7 @@ void test_vloxseg2ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_tu(
@@ -485,7 +485,7 @@ void test_vloxseg2ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_tu(
@@ -498,7 +498,7 @@ void test_vloxseg2ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_tu(
@@ -511,7 +511,7 @@ void test_vloxseg2ei64_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_tu(
@@ -524,7 +524,7 @@ void test_vloxseg2ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_tu(
@@ -537,7 +537,7 @@ void test_vloxseg2ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_tum(
@@ -550,7 +550,7 @@ void test_vloxseg2ei64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_tum(
@@ -563,7 +563,7 @@ void test_vloxseg2ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_tum(
@@ -576,7 +576,7 @@ void test_vloxseg2ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_tum(
@@ -589,7 +589,7 @@ void test_vloxseg2ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_tum(
@@ -602,7 +602,7 @@ void test_vloxseg2ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_tum(
@@ -615,7 +615,7 @@ void test_vloxseg2ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_tum(
@@ -628,7 +628,7 @@ void test_vloxseg2ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_tum(
@@ -641,7 +641,7 @@ void test_vloxseg2ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_tum(
@@ -654,7 +654,7 @@ void test_vloxseg2ei64_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_tum(
@@ -667,7 +667,7 @@ void test_vloxseg2ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_tum(
@@ -680,7 +680,7 @@ void test_vloxseg2ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_tum(
@@ -693,7 +693,7 @@ void test_vloxseg2ei64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_tum(
@@ -706,7 +706,7 @@ void test_vloxseg2ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_tum(
@@ -719,7 +719,7 @@ void test_vloxseg2ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_tum(
@@ -732,7 +732,7 @@ void test_vloxseg2ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_tum(
@@ -745,7 +745,7 @@ void test_vloxseg2ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_tum(
@@ -758,7 +758,7 @@ void test_vloxseg2ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_tum(
@@ -771,7 +771,7 @@ void test_vloxseg2ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_tum(
@@ -784,7 +784,7 @@ void test_vloxseg2ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_tum(
@@ -797,7 +797,7 @@ void test_vloxseg2ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_tum(
@@ -810,7 +810,7 @@ void test_vloxseg2ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_tum(
@@ -823,7 +823,7 @@ void test_vloxseg2ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_tum(
@@ -836,7 +836,7 @@ void test_vloxseg2ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_tum(
@@ -849,7 +849,7 @@ void test_vloxseg2ei64_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_tum(
@@ -862,7 +862,7 @@ void test_vloxseg2ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_tum(
@@ -875,7 +875,7 @@ void test_vloxseg2ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_tum(
@@ -888,7 +888,7 @@ void test_vloxseg2ei64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_tum(
@@ -901,7 +901,7 @@ void test_vloxseg2ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_tum(
@@ -914,7 +914,7 @@ void test_vloxseg2ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_tum(
@@ -927,7 +927,7 @@ void test_vloxseg2ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_tum(
@@ -940,7 +940,7 @@ void test_vloxseg2ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_tum(
@@ -953,7 +953,7 @@ void test_vloxseg2ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_tum(
@@ -966,7 +966,7 @@ void test_vloxseg2ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_tum(
@@ -979,7 +979,7 @@ void test_vloxseg2ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_tum(
@@ -992,7 +992,7 @@ void test_vloxseg2ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_tum(
@@ -1005,7 +1005,7 @@ void test_vloxseg2ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_tum(
@@ -1018,7 +1018,7 @@ void test_vloxseg2ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_tum(
@@ -1031,7 +1031,7 @@ void test_vloxseg2ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_tum(
@@ -1044,7 +1044,7 @@ void test_vloxseg2ei64_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_tum(
@@ -1057,7 +1057,7 @@ void test_vloxseg2ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_tum(
@@ -1070,7 +1070,7 @@ void test_vloxseg2ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_tumu(
@@ -1083,7 +1083,7 @@ void test_vloxseg2ei64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_tumu(
@@ -1096,7 +1096,7 @@ void test_vloxseg2ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_tumu(
@@ -1109,7 +1109,7 @@ void test_vloxseg2ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_tumu(
@@ -1122,7 +1122,7 @@ void test_vloxseg2ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_tumu(
@@ -1135,7 +1135,7 @@ void test_vloxseg2ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_tumu(
@@ -1148,7 +1148,7 @@ void test_vloxseg2ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_tumu(
@@ -1161,7 +1161,7 @@ void test_vloxseg2ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_tumu(
@@ -1174,7 +1174,7 @@ void test_vloxseg2ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_tumu(
@@ -1187,7 +1187,7 @@ void test_vloxseg2ei64_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_tumu(
@@ -1200,7 +1200,7 @@ void test_vloxseg2ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_tumu(
@@ -1213,7 +1213,7 @@ void test_vloxseg2ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_tumu(
@@ -1226,7 +1226,7 @@ void test_vloxseg2ei64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_tumu(
@@ -1239,7 +1239,7 @@ void test_vloxseg2ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_tumu(
@@ -1252,7 +1252,7 @@ void test_vloxseg2ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_tumu(
@@ -1265,7 +1265,7 @@ void test_vloxseg2ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_tumu(
@@ -1278,7 +1278,7 @@ void test_vloxseg2ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_tumu(
@@ -1291,7 +1291,7 @@ void test_vloxseg2ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_tumu(
@@ -1304,7 +1304,7 @@ void test_vloxseg2ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_tumu(
@@ -1317,7 +1317,7 @@ void test_vloxseg2ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_tumu(
@@ -1330,7 +1330,7 @@ void test_vloxseg2ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_tumu(
@@ -1343,7 +1343,7 @@ void test_vloxseg2ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_tumu(
@@ -1356,7 +1356,7 @@ void test_vloxseg2ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg2ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_tumu(
@@ -1382,7 +1382,7 @@ void test_vloxseg2ei64_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_tumu(
@@ -1395,7 +1395,7 @@ void test_vloxseg2ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_tumu(
@@ -1408,7 +1408,7 @@ void test_vloxseg2ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_tumu(
@@ -1421,7 +1421,7 @@ void test_vloxseg2ei64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_tumu(
@@ -1434,7 +1434,7 @@ void test_vloxseg2ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_tumu(
@@ -1447,7 +1447,7 @@ void test_vloxseg2ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_tumu(
@@ -1460,7 +1460,7 @@ void test_vloxseg2ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_tumu(
@@ -1473,7 +1473,7 @@ void test_vloxseg2ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_tumu(
@@ -1486,7 +1486,7 @@ void test_vloxseg2ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vloxseg2ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_tumu(
@@ -1512,7 +1512,7 @@ void test_vloxseg2ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_tumu(
@@ -1525,7 +1525,7 @@ void test_vloxseg2ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_tumu(
@@ -1538,7 +1538,7 @@ void test_vloxseg2ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_tumu(
@@ -1551,7 +1551,7 @@ void test_vloxseg2ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_tumu(
@@ -1564,7 +1564,7 @@ void test_vloxseg2ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_tumu(
@@ -1577,7 +1577,7 @@ void test_vloxseg2ei64_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_tumu(
@@ -1590,7 +1590,7 @@ void test_vloxseg2ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_tumu(
@@ -1603,7 +1603,7 @@ void test_vloxseg2ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_mu(
@@ -1616,7 +1616,7 @@ void test_vloxseg2ei64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_mu(
@@ -1629,7 +1629,7 @@ void test_vloxseg2ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_mu(
@@ -1642,7 +1642,7 @@ void test_vloxseg2ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_mu(
@@ -1655,7 +1655,7 @@ void test_vloxseg2ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_mu(
@@ -1668,7 +1668,7 @@ void test_vloxseg2ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_mu(
@@ -1681,7 +1681,7 @@ void test_vloxseg2ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_mu(
@@ -1694,7 +1694,7 @@ void test_vloxseg2ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_mu(
@@ -1707,7 +1707,7 @@ void test_vloxseg2ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_mu(
@@ -1720,7 +1720,7 @@ void test_vloxseg2ei64_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_mu(
@@ -1733,7 +1733,7 @@ void test_vloxseg2ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_mu(
@@ -1746,7 +1746,7 @@ void test_vloxseg2ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_mu(
@@ -1759,7 +1759,7 @@ void test_vloxseg2ei64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_mu(
@@ -1772,7 +1772,7 @@ void test_vloxseg2ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_mu(
@@ -1785,7 +1785,7 @@ void test_vloxseg2ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_mu(
@@ -1798,7 +1798,7 @@ void test_vloxseg2ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_mu(
@@ -1811,7 +1811,7 @@ void test_vloxseg2ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_mu(
@@ -1824,7 +1824,7 @@ void test_vloxseg2ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_mu(
@@ -1837,7 +1837,7 @@ void test_vloxseg2ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_mu(
@@ -1850,7 +1850,7 @@ void test_vloxseg2ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_mu(
@@ -1863,7 +1863,7 @@ void test_vloxseg2ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_mu(
@@ -1876,7 +1876,7 @@ void test_vloxseg2ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_mu(
@@ -1889,7 +1889,7 @@ void test_vloxseg2ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_mu(
@@ -1902,7 +1902,7 @@ void test_vloxseg2ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_mu(
@@ -1915,7 +1915,7 @@ void test_vloxseg2ei64_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_mu(
@@ -1928,7 +1928,7 @@ void test_vloxseg2ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_mu(
@@ -1941,7 +1941,7 @@ void test_vloxseg2ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_mu(
@@ -1954,7 +1954,7 @@ void test_vloxseg2ei64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_mu(
@@ -1967,7 +1967,7 @@ void test_vloxseg2ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_mu(
@@ -1980,7 +1980,7 @@ void test_vloxseg2ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_mu(
@@ -1993,7 +1993,7 @@ void test_vloxseg2ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_mu(
@@ -2006,7 +2006,7 @@ void test_vloxseg2ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_mu(
@@ -2019,7 +2019,7 @@ void test_vloxseg2ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_mu(
@@ -2032,7 +2032,7 @@ void test_vloxseg2ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_mu(
@@ -2045,7 +2045,7 @@ void test_vloxseg2ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_mu(
@@ -2058,7 +2058,7 @@ void test_vloxseg2ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_mu(
@@ -2071,7 +2071,7 @@ void test_vloxseg2ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_mu(
@@ -2084,7 +2084,7 @@ void test_vloxseg2ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_mu(
@@ -2097,7 +2097,7 @@ void test_vloxseg2ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_mu(
@@ -2110,7 +2110,7 @@ void test_vloxseg2ei64_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_mu(
@@ -2123,7 +2123,7 @@ void test_vloxseg2ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_mu(
@@ -2136,6 +2136,6 @@ void test_vloxseg2ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c
index 0e46c7405411..bc01e555bb8a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vloxseg2ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vloxseg2ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vloxseg2ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_tu(
@@ -69,7 +69,7 @@ void test_vloxseg2ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_tu(
@@ -82,7 +82,7 @@ void test_vloxseg2ei8_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_tu(
@@ -95,7 +95,7 @@ void test_vloxseg2ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_tu(
@@ -108,7 +108,7 @@ void test_vloxseg2ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_tu(
@@ -121,7 +121,7 @@ void test_vloxseg2ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_tu(
@@ -134,7 +134,7 @@ void test_vloxseg2ei8_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_tu(
@@ -147,7 +147,7 @@ void test_vloxseg2ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_tu(
@@ -160,7 +160,7 @@ void test_vloxseg2ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_tu(
@@ -173,7 +173,7 @@ void test_vloxseg2ei8_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_tu(
@@ -186,7 +186,7 @@ void test_vloxseg2ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_tu(
@@ -199,7 +199,7 @@ void test_vloxseg2ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_tu(
@@ -212,7 +212,7 @@ void test_vloxseg2ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_tu(
@@ -225,7 +225,7 @@ void test_vloxseg2ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedof
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_tu(
@@ -238,7 +238,7 @@ void test_vloxseg2ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedof
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_tu(
@@ -251,7 +251,7 @@ void test_vloxseg2ei8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedof
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_tu(
@@ -264,7 +264,7 @@ void test_vloxseg2ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vloxseg2ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_tu(
@@ -290,7 +290,7 @@ void test_vloxseg2ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_tu(
@@ -303,7 +303,7 @@ void test_vloxseg2ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_tu(
@@ -316,7 +316,7 @@ void test_vloxseg2ei8_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_tu(
@@ -329,7 +329,7 @@ void test_vloxseg2ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_tu(
@@ -342,7 +342,7 @@ void test_vloxseg2ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_tu(
@@ -355,7 +355,7 @@ void test_vloxseg2ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_tu(
@@ -368,7 +368,7 @@ void test_vloxseg2ei8_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_tu(
@@ -381,7 +381,7 @@ void test_vloxseg2ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_tu(
@@ -394,7 +394,7 @@ void test_vloxseg2ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_tu(
@@ -407,7 +407,7 @@ void test_vloxseg2ei8_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_tu(
@@ -420,7 +420,7 @@ void test_vloxseg2ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_tu(
@@ -433,7 +433,7 @@ void test_vloxseg2ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_tu(
@@ -446,7 +446,7 @@ void test_vloxseg2ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_tu(
@@ -459,7 +459,7 @@ void test_vloxseg2ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_tu(
@@ -472,7 +472,7 @@ void test_vloxseg2ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_tu(
@@ -485,7 +485,7 @@ void test_vloxseg2ei8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_tu(
@@ -498,7 +498,7 @@ void test_vloxseg2ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_tu(
@@ -511,7 +511,7 @@ void test_vloxseg2ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_tu(
@@ -524,7 +524,7 @@ void test_vloxseg2ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_tu(
@@ -537,7 +537,7 @@ void test_vloxseg2ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_tu(
@@ -550,7 +550,7 @@ void test_vloxseg2ei8_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_tu(
@@ -563,7 +563,7 @@ void test_vloxseg2ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_tu(
@@ -576,7 +576,7 @@ void test_vloxseg2ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_tu(
@@ -589,7 +589,7 @@ void test_vloxseg2ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vloxseg2ei8_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_tu(
@@ -615,7 +615,7 @@ void test_vloxseg2ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_tu(
@@ -628,7 +628,7 @@ void test_vloxseg2ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_tum(
@@ -641,7 +641,7 @@ void test_vloxseg2ei8_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_tum(
@@ -654,7 +654,7 @@ void test_vloxseg2ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_tum(
@@ -667,7 +667,7 @@ void test_vloxseg2ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_tum(
@@ -680,7 +680,7 @@ void test_vloxseg2ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_tum(
@@ -693,7 +693,7 @@ void test_vloxseg2ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_tum(
@@ -706,7 +706,7 @@ void test_vloxseg2ei8_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_tum(
@@ -719,7 +719,7 @@ void test_vloxseg2ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_tum(
@@ -732,7 +732,7 @@ void test_vloxseg2ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_tum(
@@ -745,7 +745,7 @@ void test_vloxseg2ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_tum(
@@ -758,7 +758,7 @@ void test_vloxseg2ei8_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_tum(
@@ -771,7 +771,7 @@ void test_vloxseg2ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_tum(
@@ -784,7 +784,7 @@ void test_vloxseg2ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_tum(
@@ -797,7 +797,7 @@ void test_vloxseg2ei8_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_tum(
@@ -810,7 +810,7 @@ void test_vloxseg2ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_tum(
@@ -823,7 +823,7 @@ void test_vloxseg2ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_tum(
@@ -836,7 +836,7 @@ void test_vloxseg2ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_tum(
@@ -849,7 +849,7 @@ void test_vloxseg2ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_tum(
@@ -862,7 +862,7 @@ void test_vloxseg2ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_tum(
@@ -875,7 +875,7 @@ void test_vloxseg2ei8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_tum(
@@ -888,7 +888,7 @@ void test_vloxseg2ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vloxseg2ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_tum(
@@ -914,7 +914,7 @@ void test_vloxseg2ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_tum(
@@ -927,7 +927,7 @@ void test_vloxseg2ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_tum(
@@ -940,7 +940,7 @@ void test_vloxseg2ei8_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_tum(
@@ -953,7 +953,7 @@ void test_vloxseg2ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_tum(
@@ -966,7 +966,7 @@ void test_vloxseg2ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_tum(
@@ -979,7 +979,7 @@ void test_vloxseg2ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_tum(
@@ -992,7 +992,7 @@ void test_vloxseg2ei8_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_tum(
@@ -1005,7 +1005,7 @@ void test_vloxseg2ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_tum(
@@ -1018,7 +1018,7 @@ void test_vloxseg2ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_tum(
@@ -1031,7 +1031,7 @@ void test_vloxseg2ei8_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_tum(
@@ -1044,7 +1044,7 @@ void test_vloxseg2ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_tum(
@@ -1057,7 +1057,7 @@ void test_vloxseg2ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_tum(
@@ -1070,7 +1070,7 @@ void test_vloxseg2ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_tum(
@@ -1083,7 +1083,7 @@ void test_vloxseg2ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_tum(
@@ -1096,7 +1096,7 @@ void test_vloxseg2ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_tum(
@@ -1109,7 +1109,7 @@ void test_vloxseg2ei8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_tum(
@@ -1122,7 +1122,7 @@ void test_vloxseg2ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_tum(
@@ -1135,7 +1135,7 @@ void test_vloxseg2ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_tum(
@@ -1148,7 +1148,7 @@ void test_vloxseg2ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_tum(
@@ -1161,7 +1161,7 @@ void test_vloxseg2ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_tum(
@@ -1174,7 +1174,7 @@ void test_vloxseg2ei8_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_tum(
@@ -1187,7 +1187,7 @@ void test_vloxseg2ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_tum(
@@ -1200,7 +1200,7 @@ void test_vloxseg2ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_tum(
@@ -1213,7 +1213,7 @@ void test_vloxseg2ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_tum(
@@ -1226,7 +1226,7 @@ void test_vloxseg2ei8_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_tum(
@@ -1239,7 +1239,7 @@ void test_vloxseg2ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_tum(
@@ -1252,7 +1252,7 @@ void test_vloxseg2ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_tumu(
@@ -1265,7 +1265,7 @@ void test_vloxseg2ei8_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_tumu(
@@ -1278,7 +1278,7 @@ void test_vloxseg2ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_tumu(
@@ -1291,7 +1291,7 @@ void test_vloxseg2ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_tumu(
@@ -1304,7 +1304,7 @@ void test_vloxseg2ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_tumu(
@@ -1317,7 +1317,7 @@ void test_vloxseg2ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_tumu(
@@ -1330,7 +1330,7 @@ void test_vloxseg2ei8_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_tumu(
@@ -1343,7 +1343,7 @@ void test_vloxseg2ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_tumu(
@@ -1356,7 +1356,7 @@ void test_vloxseg2ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg2ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_tumu(
@@ -1382,7 +1382,7 @@ void test_vloxseg2ei8_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_tumu(
@@ -1395,7 +1395,7 @@ void test_vloxseg2ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_tumu(
@@ -1408,7 +1408,7 @@ void test_vloxseg2ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_tumu(
@@ -1421,7 +1421,7 @@ void test_vloxseg2ei8_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_tumu(
@@ -1434,7 +1434,7 @@ void test_vloxseg2ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_tumu(
@@ -1447,7 +1447,7 @@ void test_vloxseg2ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_tumu(
@@ -1460,7 +1460,7 @@ void test_vloxseg2ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_tumu(
@@ -1473,7 +1473,7 @@ void test_vloxseg2ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_tumu(
@@ -1486,7 +1486,7 @@ void test_vloxseg2ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_tumu(
@@ -1499,7 +1499,7 @@ void test_vloxseg2ei8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_tumu(
@@ -1512,7 +1512,7 @@ void test_vloxseg2ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_tumu(
@@ -1525,7 +1525,7 @@ void test_vloxseg2ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_tumu(
@@ -1538,7 +1538,7 @@ void test_vloxseg2ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_tumu(
@@ -1551,7 +1551,7 @@ void test_vloxseg2ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vloxseg2ei8_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_tumu(
@@ -1577,7 +1577,7 @@ void test_vloxseg2ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_tumu(
@@ -1590,7 +1590,7 @@ void test_vloxseg2ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_tumu(
@@ -1603,7 +1603,7 @@ void test_vloxseg2ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_tumu(
@@ -1616,7 +1616,7 @@ void test_vloxseg2ei8_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_tumu(
@@ -1629,7 +1629,7 @@ void test_vloxseg2ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_tumu(
@@ -1642,7 +1642,7 @@ void test_vloxseg2ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_tumu(
@@ -1655,7 +1655,7 @@ void test_vloxseg2ei8_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_tumu(
@@ -1668,7 +1668,7 @@ void test_vloxseg2ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_tumu(
@@ -1681,7 +1681,7 @@ void test_vloxseg2ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_tumu(
@@ -1694,7 +1694,7 @@ void test_vloxseg2ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_tumu(
@@ -1707,7 +1707,7 @@ void test_vloxseg2ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_tumu(
@@ -1720,7 +1720,7 @@ void test_vloxseg2ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_tumu(
@@ -1733,7 +1733,7 @@ void test_vloxseg2ei8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_tumu(
@@ -1746,7 +1746,7 @@ void test_vloxseg2ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_tumu(
@@ -1759,7 +1759,7 @@ void test_vloxseg2ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_tumu(
@@ -1772,7 +1772,7 @@ void test_vloxseg2ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_tumu(
@@ -1785,7 +1785,7 @@ void test_vloxseg2ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_tumu(
@@ -1798,7 +1798,7 @@ void test_vloxseg2ei8_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_tumu(
@@ -1811,7 +1811,7 @@ void test_vloxseg2ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_tumu(
@@ -1824,7 +1824,7 @@ void test_vloxseg2ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_tumu(
@@ -1837,7 +1837,7 @@ void test_vloxseg2ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_tumu(
@@ -1850,7 +1850,7 @@ void test_vloxseg2ei8_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_tumu(
@@ -1863,7 +1863,7 @@ void test_vloxseg2ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_tumu(
@@ -1876,7 +1876,7 @@ void test_vloxseg2ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_mu(
@@ -1889,7 +1889,7 @@ void test_vloxseg2ei8_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_mu(
@@ -1902,7 +1902,7 @@ void test_vloxseg2ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vloxseg2ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_mu(
@@ -1928,7 +1928,7 @@ void test_vloxseg2ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_mu(
@@ -1941,7 +1941,7 @@ void test_vloxseg2ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vloxseg2ei8_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_mu(
@@ -1967,7 +1967,7 @@ void test_vloxseg2ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_mu(
@@ -1980,7 +1980,7 @@ void test_vloxseg2ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_mu(
@@ -1993,7 +1993,7 @@ void test_vloxseg2ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_mu(
@@ -2006,7 +2006,7 @@ void test_vloxseg2ei8_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_mu(
@@ -2019,7 +2019,7 @@ void test_vloxseg2ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_mu(
@@ -2032,7 +2032,7 @@ void test_vloxseg2ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_mu(
@@ -2045,7 +2045,7 @@ void test_vloxseg2ei8_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_mu(
@@ -2058,7 +2058,7 @@ void test_vloxseg2ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_mu(
@@ -2071,7 +2071,7 @@ void test_vloxseg2ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_mu(
@@ -2084,7 +2084,7 @@ void test_vloxseg2ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_mu(
@@ -2097,7 +2097,7 @@ void test_vloxseg2ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_mu(
@@ -2110,7 +2110,7 @@ void test_vloxseg2ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_mu(
@@ -2123,7 +2123,7 @@ void test_vloxseg2ei8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_mu(
@@ -2136,7 +2136,7 @@ void test_vloxseg2ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_mu(
@@ -2149,7 +2149,7 @@ void test_vloxseg2ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_mu(
@@ -2162,7 +2162,7 @@ void test_vloxseg2ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_mu(
@@ -2175,7 +2175,7 @@ void test_vloxseg2ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_mu(
@@ -2188,7 +2188,7 @@ void test_vloxseg2ei8_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_mu(
@@ -2201,7 +2201,7 @@ void test_vloxseg2ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_mu(
@@ -2214,7 +2214,7 @@ void test_vloxseg2ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_mu(
@@ -2227,7 +2227,7 @@ void test_vloxseg2ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_mu(
@@ -2240,7 +2240,7 @@ void test_vloxseg2ei8_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_mu(
@@ -2253,7 +2253,7 @@ void test_vloxseg2ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_mu(
@@ -2266,7 +2266,7 @@ void test_vloxseg2ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_mu(
@@ -2279,7 +2279,7 @@ void test_vloxseg2ei8_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_mu(
@@ -2292,7 +2292,7 @@ void test_vloxseg2ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_mu(
@@ -2305,7 +2305,7 @@ void test_vloxseg2ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_mu(
@@ -2318,7 +2318,7 @@ void test_vloxseg2ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_mu(
@@ -2331,7 +2331,7 @@ void test_vloxseg2ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_mu(
@@ -2344,7 +2344,7 @@ void test_vloxseg2ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_mu(
@@ -2357,7 +2357,7 @@ void test_vloxseg2ei8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_mu(
@@ -2370,7 +2370,7 @@ void test_vloxseg2ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_mu(
@@ -2383,7 +2383,7 @@ void test_vloxseg2ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_mu(
@@ -2396,7 +2396,7 @@ void test_vloxseg2ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_mu(
@@ -2409,7 +2409,7 @@ void test_vloxseg2ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_mu(
@@ -2422,7 +2422,7 @@ void test_vloxseg2ei8_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_mu(
@@ -2435,7 +2435,7 @@ void test_vloxseg2ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_mu(
@@ -2448,7 +2448,7 @@ void test_vloxseg2ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_mu(
@@ -2461,7 +2461,7 @@ void test_vloxseg2ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_mu(
@@ -2474,7 +2474,7 @@ void test_vloxseg2ei8_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_mu(
@@ -2487,7 +2487,7 @@ void test_vloxseg2ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_mu(
@@ -2500,6 +2500,6 @@ void test_vloxseg2ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg2ei8_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vloxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c
index 7318b2ab77ba..b3beba0bf075 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vloxseg3ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vloxseg3ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vloxseg3ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_tu(
@@ -79,7 +79,7 @@ void test_vloxseg3ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_tu(
@@ -94,7 +94,7 @@ void test_vloxseg3ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_tu(
@@ -109,7 +109,7 @@ void test_vloxseg3ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_tu(
@@ -124,7 +124,7 @@ void test_vloxseg3ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_tu(
@@ -139,7 +139,7 @@ void test_vloxseg3ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_tu(
@@ -154,7 +154,7 @@ void test_vloxseg3ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_tu(
@@ -169,7 +169,7 @@ void test_vloxseg3ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_tu(
@@ -184,7 +184,7 @@ void test_vloxseg3ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vloxseg3ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_tu(
@@ -214,7 +214,7 @@ void test_vloxseg3ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_tu(
@@ -229,7 +229,7 @@ void test_vloxseg3ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_tu(
@@ -244,7 +244,7 @@ void test_vloxseg3ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_tu(
@@ -259,7 +259,7 @@ void test_vloxseg3ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_tu(
@@ -274,7 +274,7 @@ void test_vloxseg3ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_tu(
@@ -289,7 +289,7 @@ void test_vloxseg3ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_tu(
@@ -304,7 +304,7 @@ void test_vloxseg3ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_tu(
@@ -319,7 +319,7 @@ void test_vloxseg3ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_tu(
@@ -334,7 +334,7 @@ void test_vloxseg3ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_tu(
@@ -349,7 +349,7 @@ void test_vloxseg3ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_tu(
@@ -364,7 +364,7 @@ void test_vloxseg3ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_tu(
@@ -379,7 +379,7 @@ void test_vloxseg3ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_tu(
@@ -394,7 +394,7 @@ void test_vloxseg3ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_tu(
@@ -409,7 +409,7 @@ void test_vloxseg3ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_tu(
@@ -424,7 +424,7 @@ void test_vloxseg3ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_tu(
@@ -439,7 +439,7 @@ void test_vloxseg3ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_tu(
@@ -454,7 +454,7 @@ void test_vloxseg3ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_tu(
@@ -469,7 +469,7 @@ void test_vloxseg3ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_tu(
@@ -484,7 +484,7 @@ void test_vloxseg3ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_tu(
@@ -499,7 +499,7 @@ void test_vloxseg3ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_tu(
@@ -514,7 +514,7 @@ void test_vloxseg3ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_tu(
@@ -529,7 +529,7 @@ void test_vloxseg3ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_tu(
@@ -544,7 +544,7 @@ void test_vloxseg3ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_tu(
@@ -559,7 +559,7 @@ void test_vloxseg3ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_tum(
@@ -574,7 +574,7 @@ void test_vloxseg3ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_tum(
@@ -589,7 +589,7 @@ void test_vloxseg3ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_tum(
@@ -604,7 +604,7 @@ void test_vloxseg3ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_tum(
@@ -619,7 +619,7 @@ void test_vloxseg3ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vloxseg3ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_tum(
@@ -649,7 +649,7 @@ void test_vloxseg3ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_tum(
@@ -664,7 +664,7 @@ void test_vloxseg3ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_tum(
@@ -679,7 +679,7 @@ void test_vloxseg3ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_tum(
@@ -694,7 +694,7 @@ void test_vloxseg3ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_tum(
@@ -709,7 +709,7 @@ void test_vloxseg3ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_tum(
@@ -724,7 +724,7 @@ void test_vloxseg3ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vloxseg3ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_tum(
@@ -754,7 +754,7 @@ void test_vloxseg3ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_tum(
@@ -769,7 +769,7 @@ void test_vloxseg3ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_tum(
@@ -784,7 +784,7 @@ void test_vloxseg3ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_tum(
@@ -799,7 +799,7 @@ void test_vloxseg3ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_tum(
@@ -814,7 +814,7 @@ void test_vloxseg3ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_tum(
@@ -829,7 +829,7 @@ void test_vloxseg3ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vloxseg3ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_tum(
@@ -859,7 +859,7 @@ void test_vloxseg3ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_tum(
@@ -874,7 +874,7 @@ void test_vloxseg3ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_tum(
@@ -889,7 +889,7 @@ void test_vloxseg3ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_tum(
@@ -904,7 +904,7 @@ void test_vloxseg3ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_tum(
@@ -919,7 +919,7 @@ void test_vloxseg3ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_tum(
@@ -934,7 +934,7 @@ void test_vloxseg3ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vloxseg3ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_tum(
@@ -964,7 +964,7 @@ void test_vloxseg3ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_tum(
@@ -979,7 +979,7 @@ void test_vloxseg3ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_tum(
@@ -994,7 +994,7 @@ void test_vloxseg3ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_tum(
@@ -1009,7 +1009,7 @@ void test_vloxseg3ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_tum(
@@ -1024,7 +1024,7 @@ void test_vloxseg3ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_tum(
@@ -1039,7 +1039,7 @@ void test_vloxseg3ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg3ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_tum(
@@ -1069,7 +1069,7 @@ void test_vloxseg3ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_tum(
@@ -1084,7 +1084,7 @@ void test_vloxseg3ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_tum(
@@ -1099,7 +1099,7 @@ void test_vloxseg3ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_tum(
@@ -1114,7 +1114,7 @@ void test_vloxseg3ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_tumu(
@@ -1129,7 +1129,7 @@ void test_vloxseg3ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_tumu(
@@ -1144,7 +1144,7 @@ void test_vloxseg3ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vloxseg3ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_tumu(
@@ -1174,7 +1174,7 @@ void test_vloxseg3ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_tumu(
@@ -1189,7 +1189,7 @@ void test_vloxseg3ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_tumu(
@@ -1204,7 +1204,7 @@ void test_vloxseg3ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_tumu(
@@ -1219,7 +1219,7 @@ void test_vloxseg3ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_tumu(
@@ -1234,7 +1234,7 @@ void test_vloxseg3ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_tumu(
@@ -1249,7 +1249,7 @@ void test_vloxseg3ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_tumu(
@@ -1264,7 +1264,7 @@ void test_vloxseg3ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vloxseg3ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_tumu(
@@ -1294,7 +1294,7 @@ void test_vloxseg3ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_tumu(
@@ -1309,7 +1309,7 @@ void test_vloxseg3ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_tumu(
@@ -1324,7 +1324,7 @@ void test_vloxseg3ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_tumu(
@@ -1339,7 +1339,7 @@ void test_vloxseg3ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vloxseg3ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg3ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_tumu(
@@ -1384,7 +1384,7 @@ void test_vloxseg3ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_tumu(
@@ -1399,7 +1399,7 @@ void test_vloxseg3ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_tumu(
@@ -1414,7 +1414,7 @@ void test_vloxseg3ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg3ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_tumu(
@@ -1444,7 +1444,7 @@ void test_vloxseg3ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_tumu(
@@ -1459,7 +1459,7 @@ void test_vloxseg3ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_tumu(
@@ -1474,7 +1474,7 @@ void test_vloxseg3ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_tumu(
@@ -1489,7 +1489,7 @@ void test_vloxseg3ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_tumu(
@@ -1504,7 +1504,7 @@ void test_vloxseg3ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_tumu(
@@ -1519,7 +1519,7 @@ void test_vloxseg3ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_tumu(
@@ -1534,7 +1534,7 @@ void test_vloxseg3ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_tumu(
@@ -1549,7 +1549,7 @@ void test_vloxseg3ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vloxseg3ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg3ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_tumu(
@@ -1594,7 +1594,7 @@ void test_vloxseg3ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_tumu(
@@ -1609,7 +1609,7 @@ void test_vloxseg3ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_tumu(
@@ -1624,7 +1624,7 @@ void test_vloxseg3ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_tumu(
@@ -1639,7 +1639,7 @@ void test_vloxseg3ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_tumu(
@@ -1654,7 +1654,7 @@ void test_vloxseg3ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_tumu(
@@ -1669,7 +1669,7 @@ void test_vloxseg3ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_mu(
@@ -1684,7 +1684,7 @@ void test_vloxseg3ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_mu(
@@ -1699,7 +1699,7 @@ void test_vloxseg3ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_mu(
@@ -1714,7 +1714,7 @@ void test_vloxseg3ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_mu(
@@ -1729,7 +1729,7 @@ void test_vloxseg3ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_mu(
@@ -1744,7 +1744,7 @@ void test_vloxseg3ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_mu(
@@ -1759,7 +1759,7 @@ void test_vloxseg3ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_mu(
@@ -1774,7 +1774,7 @@ void test_vloxseg3ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_mu(
@@ -1789,7 +1789,7 @@ void test_vloxseg3ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_mu(
@@ -1804,7 +1804,7 @@ void test_vloxseg3ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_mu(
@@ -1819,7 +1819,7 @@ void test_vloxseg3ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_mu(
@@ -1834,7 +1834,7 @@ void test_vloxseg3ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_mu(
@@ -1849,7 +1849,7 @@ void test_vloxseg3ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_mu(
@@ -1864,7 +1864,7 @@ void test_vloxseg3ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_mu(
@@ -1879,7 +1879,7 @@ void test_vloxseg3ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_mu(
@@ -1894,7 +1894,7 @@ void test_vloxseg3ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_mu(
@@ -1909,7 +1909,7 @@ void test_vloxseg3ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_mu(
@@ -1924,7 +1924,7 @@ void test_vloxseg3ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_mu(
@@ -1939,7 +1939,7 @@ void test_vloxseg3ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vloxseg3ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_mu(
@@ -1969,7 +1969,7 @@ void test_vloxseg3ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_mu(
@@ -1984,7 +1984,7 @@ void test_vloxseg3ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_mu(
@@ -1999,7 +1999,7 @@ void test_vloxseg3ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_mu(
@@ -2014,7 +2014,7 @@ void test_vloxseg3ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_mu(
@@ -2029,7 +2029,7 @@ void test_vloxseg3ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_mu(
@@ -2044,7 +2044,7 @@ void test_vloxseg3ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_mu(
@@ -2059,7 +2059,7 @@ void test_vloxseg3ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_mu(
@@ -2074,7 +2074,7 @@ void test_vloxseg3ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_mu(
@@ -2089,7 +2089,7 @@ void test_vloxseg3ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg3ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_mu(
@@ -2119,7 +2119,7 @@ void test_vloxseg3ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_mu(
@@ -2134,7 +2134,7 @@ void test_vloxseg3ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_mu(
@@ -2149,7 +2149,7 @@ void test_vloxseg3ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_mu(
@@ -2164,7 +2164,7 @@ void test_vloxseg3ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_mu(
@@ -2179,7 +2179,7 @@ void test_vloxseg3ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_mu(
@@ -2194,7 +2194,7 @@ void test_vloxseg3ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_mu(
@@ -2209,7 +2209,7 @@ void test_vloxseg3ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_mu(
@@ -2224,6 +2224,6 @@ void test_vloxseg3ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c
index 6d7153cd3b8b..e638a7be5020 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vloxseg3ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vloxseg3ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vloxseg3ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_tu(
@@ -79,7 +79,7 @@ void test_vloxseg3ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_tu(
@@ -94,7 +94,7 @@ void test_vloxseg3ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_tu(
@@ -109,7 +109,7 @@ void test_vloxseg3ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_tu(
@@ -124,7 +124,7 @@ void test_vloxseg3ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_tu(
@@ -139,7 +139,7 @@ void test_vloxseg3ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_tu(
@@ -154,7 +154,7 @@ void test_vloxseg3ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_tu(
@@ -169,7 +169,7 @@ void test_vloxseg3ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_tu(
@@ -184,7 +184,7 @@ void test_vloxseg3ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vloxseg3ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_tu(
@@ -214,7 +214,7 @@ void test_vloxseg3ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_tu(
@@ -229,7 +229,7 @@ void test_vloxseg3ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_tu(
@@ -244,7 +244,7 @@ void test_vloxseg3ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_tu(
@@ -259,7 +259,7 @@ void test_vloxseg3ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_tu(
@@ -274,7 +274,7 @@ void test_vloxseg3ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_tu(
@@ -289,7 +289,7 @@ void test_vloxseg3ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_tu(
@@ -304,7 +304,7 @@ void test_vloxseg3ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_tu(
@@ -319,7 +319,7 @@ void test_vloxseg3ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_tu(
@@ -334,7 +334,7 @@ void test_vloxseg3ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_tu(
@@ -349,7 +349,7 @@ void test_vloxseg3ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_tu(
@@ -364,7 +364,7 @@ void test_vloxseg3ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_tu(
@@ -379,7 +379,7 @@ void test_vloxseg3ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_tu(
@@ -394,7 +394,7 @@ void test_vloxseg3ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_tu(
@@ -409,7 +409,7 @@ void test_vloxseg3ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_tu(
@@ -424,7 +424,7 @@ void test_vloxseg3ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_tu(
@@ -439,7 +439,7 @@ void test_vloxseg3ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_tu(
@@ -454,7 +454,7 @@ void test_vloxseg3ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_tu(
@@ -469,7 +469,7 @@ void test_vloxseg3ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_tu(
@@ -484,7 +484,7 @@ void test_vloxseg3ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_tu(
@@ -499,7 +499,7 @@ void test_vloxseg3ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_tu(
@@ -514,7 +514,7 @@ void test_vloxseg3ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_tu(
@@ -529,7 +529,7 @@ void test_vloxseg3ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_tu(
@@ -544,7 +544,7 @@ void test_vloxseg3ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_tu(
@@ -559,7 +559,7 @@ void test_vloxseg3ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_tum(
@@ -574,7 +574,7 @@ void test_vloxseg3ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_tum(
@@ -589,7 +589,7 @@ void test_vloxseg3ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_tum(
@@ -604,7 +604,7 @@ void test_vloxseg3ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_tum(
@@ -619,7 +619,7 @@ void test_vloxseg3ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vloxseg3ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_tum(
@@ -649,7 +649,7 @@ void test_vloxseg3ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_tum(
@@ -664,7 +664,7 @@ void test_vloxseg3ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_tum(
@@ -679,7 +679,7 @@ void test_vloxseg3ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_tum(
@@ -694,7 +694,7 @@ void test_vloxseg3ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_tum(
@@ -709,7 +709,7 @@ void test_vloxseg3ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_tum(
@@ -724,7 +724,7 @@ void test_vloxseg3ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vloxseg3ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_tum(
@@ -754,7 +754,7 @@ void test_vloxseg3ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_tum(
@@ -769,7 +769,7 @@ void test_vloxseg3ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_tum(
@@ -784,7 +784,7 @@ void test_vloxseg3ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_tum(
@@ -799,7 +799,7 @@ void test_vloxseg3ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_tum(
@@ -814,7 +814,7 @@ void test_vloxseg3ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_tum(
@@ -829,7 +829,7 @@ void test_vloxseg3ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vloxseg3ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_tum(
@@ -859,7 +859,7 @@ void test_vloxseg3ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_tum(
@@ -874,7 +874,7 @@ void test_vloxseg3ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_tum(
@@ -889,7 +889,7 @@ void test_vloxseg3ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_tum(
@@ -904,7 +904,7 @@ void test_vloxseg3ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_tum(
@@ -919,7 +919,7 @@ void test_vloxseg3ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_tum(
@@ -934,7 +934,7 @@ void test_vloxseg3ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vloxseg3ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_tum(
@@ -964,7 +964,7 @@ void test_vloxseg3ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_tum(
@@ -979,7 +979,7 @@ void test_vloxseg3ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_tum(
@@ -994,7 +994,7 @@ void test_vloxseg3ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_tum(
@@ -1009,7 +1009,7 @@ void test_vloxseg3ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_tum(
@@ -1024,7 +1024,7 @@ void test_vloxseg3ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_tum(
@@ -1039,7 +1039,7 @@ void test_vloxseg3ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg3ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_tum(
@@ -1069,7 +1069,7 @@ void test_vloxseg3ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_tum(
@@ -1084,7 +1084,7 @@ void test_vloxseg3ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_tum(
@@ -1099,7 +1099,7 @@ void test_vloxseg3ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_tum(
@@ -1114,7 +1114,7 @@ void test_vloxseg3ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_tumu(
@@ -1129,7 +1129,7 @@ void test_vloxseg3ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_tumu(
@@ -1144,7 +1144,7 @@ void test_vloxseg3ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vloxseg3ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_tumu(
@@ -1174,7 +1174,7 @@ void test_vloxseg3ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_tumu(
@@ -1189,7 +1189,7 @@ void test_vloxseg3ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_tumu(
@@ -1204,7 +1204,7 @@ void test_vloxseg3ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_tumu(
@@ -1219,7 +1219,7 @@ void test_vloxseg3ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_tumu(
@@ -1234,7 +1234,7 @@ void test_vloxseg3ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_tumu(
@@ -1249,7 +1249,7 @@ void test_vloxseg3ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_tumu(
@@ -1264,7 +1264,7 @@ void test_vloxseg3ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vloxseg3ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_tumu(
@@ -1294,7 +1294,7 @@ void test_vloxseg3ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_tumu(
@@ -1309,7 +1309,7 @@ void test_vloxseg3ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_tumu(
@@ -1324,7 +1324,7 @@ void test_vloxseg3ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_tumu(
@@ -1339,7 +1339,7 @@ void test_vloxseg3ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vloxseg3ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg3ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_tumu(
@@ -1384,7 +1384,7 @@ void test_vloxseg3ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_tumu(
@@ -1399,7 +1399,7 @@ void test_vloxseg3ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_tumu(
@@ -1414,7 +1414,7 @@ void test_vloxseg3ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg3ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_tumu(
@@ -1444,7 +1444,7 @@ void test_vloxseg3ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_tumu(
@@ -1459,7 +1459,7 @@ void test_vloxseg3ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_tumu(
@@ -1474,7 +1474,7 @@ void test_vloxseg3ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_tumu(
@@ -1489,7 +1489,7 @@ void test_vloxseg3ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_tumu(
@@ -1504,7 +1504,7 @@ void test_vloxseg3ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_tumu(
@@ -1519,7 +1519,7 @@ void test_vloxseg3ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_tumu(
@@ -1534,7 +1534,7 @@ void test_vloxseg3ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_tumu(
@@ -1549,7 +1549,7 @@ void test_vloxseg3ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vloxseg3ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg3ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_tumu(
@@ -1594,7 +1594,7 @@ void test_vloxseg3ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_tumu(
@@ -1609,7 +1609,7 @@ void test_vloxseg3ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_tumu(
@@ -1624,7 +1624,7 @@ void test_vloxseg3ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_tumu(
@@ -1639,7 +1639,7 @@ void test_vloxseg3ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_tumu(
@@ -1654,7 +1654,7 @@ void test_vloxseg3ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_tumu(
@@ -1669,7 +1669,7 @@ void test_vloxseg3ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_mu(
@@ -1684,7 +1684,7 @@ void test_vloxseg3ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_mu(
@@ -1699,7 +1699,7 @@ void test_vloxseg3ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_mu(
@@ -1714,7 +1714,7 @@ void test_vloxseg3ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_mu(
@@ -1729,7 +1729,7 @@ void test_vloxseg3ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_mu(
@@ -1744,7 +1744,7 @@ void test_vloxseg3ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_mu(
@@ -1759,7 +1759,7 @@ void test_vloxseg3ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_mu(
@@ -1774,7 +1774,7 @@ void test_vloxseg3ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_mu(
@@ -1789,7 +1789,7 @@ void test_vloxseg3ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_mu(
@@ -1804,7 +1804,7 @@ void test_vloxseg3ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_mu(
@@ -1819,7 +1819,7 @@ void test_vloxseg3ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_mu(
@@ -1834,7 +1834,7 @@ void test_vloxseg3ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_mu(
@@ -1849,7 +1849,7 @@ void test_vloxseg3ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_mu(
@@ -1864,7 +1864,7 @@ void test_vloxseg3ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_mu(
@@ -1879,7 +1879,7 @@ void test_vloxseg3ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_mu(
@@ -1894,7 +1894,7 @@ void test_vloxseg3ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_mu(
@@ -1909,7 +1909,7 @@ void test_vloxseg3ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_mu(
@@ -1924,7 +1924,7 @@ void test_vloxseg3ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_mu(
@@ -1939,7 +1939,7 @@ void test_vloxseg3ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vloxseg3ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_mu(
@@ -1969,7 +1969,7 @@ void test_vloxseg3ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_mu(
@@ -1984,7 +1984,7 @@ void test_vloxseg3ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_mu(
@@ -1999,7 +1999,7 @@ void test_vloxseg3ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_mu(
@@ -2014,7 +2014,7 @@ void test_vloxseg3ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_mu(
@@ -2029,7 +2029,7 @@ void test_vloxseg3ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_mu(
@@ -2044,7 +2044,7 @@ void test_vloxseg3ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_mu(
@@ -2059,7 +2059,7 @@ void test_vloxseg3ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_mu(
@@ -2074,7 +2074,7 @@ void test_vloxseg3ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_mu(
@@ -2089,7 +2089,7 @@ void test_vloxseg3ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg3ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_mu(
@@ -2119,7 +2119,7 @@ void test_vloxseg3ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_mu(
@@ -2134,7 +2134,7 @@ void test_vloxseg3ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_mu(
@@ -2149,7 +2149,7 @@ void test_vloxseg3ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_mu(
@@ -2164,7 +2164,7 @@ void test_vloxseg3ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_mu(
@@ -2179,7 +2179,7 @@ void test_vloxseg3ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_mu(
@@ -2194,7 +2194,7 @@ void test_vloxseg3ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_mu(
@@ -2209,7 +2209,7 @@ void test_vloxseg3ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_mu(
@@ -2224,6 +2224,6 @@ void test_vloxseg3ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c
index 6c02876d5707..9258f7ab7bb3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vloxseg3ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vloxseg3ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vloxseg3ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_tu(
@@ -79,7 +79,7 @@ void test_vloxseg3ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_tu(
@@ -94,7 +94,7 @@ void test_vloxseg3ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_tu(
@@ -109,7 +109,7 @@ void test_vloxseg3ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_tu(
@@ -124,7 +124,7 @@ void test_vloxseg3ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_tu(
@@ -139,7 +139,7 @@ void test_vloxseg3ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_tu(
@@ -154,7 +154,7 @@ void test_vloxseg3ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_tu(
@@ -169,7 +169,7 @@ void test_vloxseg3ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_tu(
@@ -184,7 +184,7 @@ void test_vloxseg3ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vloxseg3ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_tu(
@@ -214,7 +214,7 @@ void test_vloxseg3ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_tu(
@@ -229,7 +229,7 @@ void test_vloxseg3ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_tu(
@@ -244,7 +244,7 @@ void test_vloxseg3ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_tu(
@@ -259,7 +259,7 @@ void test_vloxseg3ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_tu(
@@ -274,7 +274,7 @@ void test_vloxseg3ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vloxseg3ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_tu(
@@ -304,7 +304,7 @@ void test_vloxseg3ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_tu(
@@ -319,7 +319,7 @@ void test_vloxseg3ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_tu(
@@ -334,7 +334,7 @@ void test_vloxseg3ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_tu(
@@ -349,7 +349,7 @@ void test_vloxseg3ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_tu(
@@ -364,7 +364,7 @@ void test_vloxseg3ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_tu(
@@ -379,7 +379,7 @@ void test_vloxseg3ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_tu(
@@ -394,7 +394,7 @@ void test_vloxseg3ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_tu(
@@ -409,7 +409,7 @@ void test_vloxseg3ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_tu(
@@ -424,7 +424,7 @@ void test_vloxseg3ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_tu(
@@ -439,7 +439,7 @@ void test_vloxseg3ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_tu(
@@ -454,7 +454,7 @@ void test_vloxseg3ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_tu(
@@ -469,7 +469,7 @@ void test_vloxseg3ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_tu(
@@ -484,7 +484,7 @@ void test_vloxseg3ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_tu(
@@ -499,7 +499,7 @@ void test_vloxseg3ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_tu(
@@ -514,7 +514,7 @@ void test_vloxseg3ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_tu(
@@ -529,7 +529,7 @@ void test_vloxseg3ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_tum(
@@ -544,7 +544,7 @@ void test_vloxseg3ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_tum(
@@ -559,7 +559,7 @@ void test_vloxseg3ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_tum(
@@ -574,7 +574,7 @@ void test_vloxseg3ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_tum(
@@ -589,7 +589,7 @@ void test_vloxseg3ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_tum(
@@ -604,7 +604,7 @@ void test_vloxseg3ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_tum(
@@ -619,7 +619,7 @@ void test_vloxseg3ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_tum(
@@ -634,7 +634,7 @@ void test_vloxseg3ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_tum(
@@ -649,7 +649,7 @@ void test_vloxseg3ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_tum(
@@ -664,7 +664,7 @@ void test_vloxseg3ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_tum(
@@ -679,7 +679,7 @@ void test_vloxseg3ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_tum(
@@ -694,7 +694,7 @@ void test_vloxseg3ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_tum(
@@ -709,7 +709,7 @@ void test_vloxseg3ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_tum(
@@ -724,7 +724,7 @@ void test_vloxseg3ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_tum(
@@ -739,7 +739,7 @@ void test_vloxseg3ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_tum(
@@ -754,7 +754,7 @@ void test_vloxseg3ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_tum(
@@ -769,7 +769,7 @@ void test_vloxseg3ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_tum(
@@ -784,7 +784,7 @@ void test_vloxseg3ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_tum(
@@ -799,7 +799,7 @@ void test_vloxseg3ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_tum(
@@ -814,7 +814,7 @@ void test_vloxseg3ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_tum(
@@ -829,7 +829,7 @@ void test_vloxseg3ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_tum(
@@ -844,7 +844,7 @@ void test_vloxseg3ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_tum(
@@ -859,7 +859,7 @@ void test_vloxseg3ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_tum(
@@ -874,7 +874,7 @@ void test_vloxseg3ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_tum(
@@ -889,7 +889,7 @@ void test_vloxseg3ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_tum(
@@ -904,7 +904,7 @@ void test_vloxseg3ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_tum(
@@ -919,7 +919,7 @@ void test_vloxseg3ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_tum(
@@ -934,7 +934,7 @@ void test_vloxseg3ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_tum(
@@ -949,7 +949,7 @@ void test_vloxseg3ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_tum(
@@ -964,7 +964,7 @@ void test_vloxseg3ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_tum(
@@ -979,7 +979,7 @@ void test_vloxseg3ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_tum(
@@ -994,7 +994,7 @@ void test_vloxseg3ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_tum(
@@ -1009,7 +1009,7 @@ void test_vloxseg3ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_tum(
@@ -1024,7 +1024,7 @@ void test_vloxseg3ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_tum(
@@ -1039,7 +1039,7 @@ void test_vloxseg3ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg3ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_tumu(
@@ -1069,7 +1069,7 @@ void test_vloxseg3ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_tumu(
@@ -1084,7 +1084,7 @@ void test_vloxseg3ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_tumu(
@@ -1099,7 +1099,7 @@ void test_vloxseg3ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_tumu(
@@ -1114,7 +1114,7 @@ void test_vloxseg3ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_tumu(
@@ -1129,7 +1129,7 @@ void test_vloxseg3ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_tumu(
@@ -1144,7 +1144,7 @@ void test_vloxseg3ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_tumu(
@@ -1159,7 +1159,7 @@ void test_vloxseg3ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_tumu(
@@ -1174,7 +1174,7 @@ void test_vloxseg3ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_tumu(
@@ -1189,7 +1189,7 @@ void test_vloxseg3ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_tumu(
@@ -1204,7 +1204,7 @@ void test_vloxseg3ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_tumu(
@@ -1219,7 +1219,7 @@ void test_vloxseg3ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_tumu(
@@ -1234,7 +1234,7 @@ void test_vloxseg3ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_tumu(
@@ -1249,7 +1249,7 @@ void test_vloxseg3ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vloxseg3ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_tumu(
@@ -1279,7 +1279,7 @@ void test_vloxseg3ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_tumu(
@@ -1294,7 +1294,7 @@ void test_vloxseg3ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_tumu(
@@ -1309,7 +1309,7 @@ void test_vloxseg3ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_tumu(
@@ -1324,7 +1324,7 @@ void test_vloxseg3ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_tumu(
@@ -1339,7 +1339,7 @@ void test_vloxseg3ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_tumu(
@@ -1354,7 +1354,7 @@ void test_vloxseg3ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg3ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_tumu(
@@ -1384,7 +1384,7 @@ void test_vloxseg3ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_tumu(
@@ -1399,7 +1399,7 @@ void test_vloxseg3ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_tumu(
@@ -1414,7 +1414,7 @@ void test_vloxseg3ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg3ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_tumu(
@@ -1444,7 +1444,7 @@ void test_vloxseg3ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_tumu(
@@ -1459,7 +1459,7 @@ void test_vloxseg3ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_tumu(
@@ -1474,7 +1474,7 @@ void test_vloxseg3ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_tumu(
@@ -1489,7 +1489,7 @@ void test_vloxseg3ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_tumu(
@@ -1504,7 +1504,7 @@ void test_vloxseg3ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_tumu(
@@ -1519,7 +1519,7 @@ void test_vloxseg3ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_tumu(
@@ -1534,7 +1534,7 @@ void test_vloxseg3ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_tumu(
@@ -1549,7 +1549,7 @@ void test_vloxseg3ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_tumu(
@@ -1564,7 +1564,7 @@ void test_vloxseg3ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg3ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_mu(
@@ -1594,7 +1594,7 @@ void test_vloxseg3ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_mu(
@@ -1609,7 +1609,7 @@ void test_vloxseg3ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_mu(
@@ -1624,7 +1624,7 @@ void test_vloxseg3ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_mu(
@@ -1639,7 +1639,7 @@ void test_vloxseg3ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_mu(
@@ -1654,7 +1654,7 @@ void test_vloxseg3ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_mu(
@@ -1669,7 +1669,7 @@ void test_vloxseg3ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_mu(
@@ -1684,7 +1684,7 @@ void test_vloxseg3ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_mu(
@@ -1699,7 +1699,7 @@ void test_vloxseg3ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_mu(
@@ -1714,7 +1714,7 @@ void test_vloxseg3ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_mu(
@@ -1729,7 +1729,7 @@ void test_vloxseg3ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_mu(
@@ -1744,7 +1744,7 @@ void test_vloxseg3ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_mu(
@@ -1759,7 +1759,7 @@ void test_vloxseg3ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_mu(
@@ -1774,7 +1774,7 @@ void test_vloxseg3ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_mu(
@@ -1789,7 +1789,7 @@ void test_vloxseg3ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_mu(
@@ -1804,7 +1804,7 @@ void test_vloxseg3ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_mu(
@@ -1819,7 +1819,7 @@ void test_vloxseg3ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_mu(
@@ -1834,7 +1834,7 @@ void test_vloxseg3ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_mu(
@@ -1849,7 +1849,7 @@ void test_vloxseg3ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_mu(
@@ -1864,7 +1864,7 @@ void test_vloxseg3ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_mu(
@@ -1879,7 +1879,7 @@ void test_vloxseg3ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_mu(
@@ -1894,7 +1894,7 @@ void test_vloxseg3ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_mu(
@@ -1909,7 +1909,7 @@ void test_vloxseg3ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_mu(
@@ -1924,7 +1924,7 @@ void test_vloxseg3ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_mu(
@@ -1939,7 +1939,7 @@ void test_vloxseg3ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vloxseg3ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_mu(
@@ -1969,7 +1969,7 @@ void test_vloxseg3ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_mu(
@@ -1984,7 +1984,7 @@ void test_vloxseg3ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_mu(
@@ -1999,7 +1999,7 @@ void test_vloxseg3ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_mu(
@@ -2014,7 +2014,7 @@ void test_vloxseg3ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_mu(
@@ -2029,7 +2029,7 @@ void test_vloxseg3ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_mu(
@@ -2044,7 +2044,7 @@ void test_vloxseg3ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_mu(
@@ -2059,7 +2059,7 @@ void test_vloxseg3ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_mu(
@@ -2074,7 +2074,7 @@ void test_vloxseg3ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_mu(
@@ -2089,7 +2089,7 @@ void test_vloxseg3ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_mu(
@@ -2104,6 +2104,6 @@ void test_vloxseg3ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c
index 206544c3b357..63588a9cc4c0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vloxseg3ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vloxseg3ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vloxseg3ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_tu(
@@ -79,7 +79,7 @@ void test_vloxseg3ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_tu(
@@ -94,7 +94,7 @@ void test_vloxseg3ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_tu(
@@ -109,7 +109,7 @@ void test_vloxseg3ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_tu(
@@ -124,7 +124,7 @@ void test_vloxseg3ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_tu(
@@ -139,7 +139,7 @@ void test_vloxseg3ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_tu(
@@ -154,7 +154,7 @@ void test_vloxseg3ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_tu(
@@ -169,7 +169,7 @@ void test_vloxseg3ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_tu(
@@ -184,7 +184,7 @@ void test_vloxseg3ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vloxseg3ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_tu(
@@ -214,7 +214,7 @@ void test_vloxseg3ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_tu(
@@ -229,7 +229,7 @@ void test_vloxseg3ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_tu(
@@ -244,7 +244,7 @@ void test_vloxseg3ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_tu(
@@ -259,7 +259,7 @@ void test_vloxseg3ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_tu(
@@ -274,7 +274,7 @@ void test_vloxseg3ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_tu(
@@ -289,7 +289,7 @@ void test_vloxseg3ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_tu(
@@ -304,7 +304,7 @@ void test_vloxseg3ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_tu(
@@ -319,7 +319,7 @@ void test_vloxseg3ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_tu(
@@ -334,7 +334,7 @@ void test_vloxseg3ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_tu(
@@ -349,7 +349,7 @@ void test_vloxseg3ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_tu(
@@ -364,7 +364,7 @@ void test_vloxseg3ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_tu(
@@ -379,7 +379,7 @@ void test_vloxseg3ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_tu(
@@ -394,7 +394,7 @@ void test_vloxseg3ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_tu(
@@ -409,7 +409,7 @@ void test_vloxseg3ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_tu(
@@ -424,7 +424,7 @@ void test_vloxseg3ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_tu(
@@ -439,7 +439,7 @@ void test_vloxseg3ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_tu(
@@ -454,7 +454,7 @@ void test_vloxseg3ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_tu(
@@ -469,7 +469,7 @@ void test_vloxseg3ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_tu(
@@ -484,7 +484,7 @@ void test_vloxseg3ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_tu(
@@ -499,7 +499,7 @@ void test_vloxseg3ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_tu(
@@ -514,7 +514,7 @@ void test_vloxseg3ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_tu(
@@ -529,7 +529,7 @@ void test_vloxseg3ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_tu(
@@ -544,7 +544,7 @@ void test_vloxseg3ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_tu(
@@ -559,7 +559,7 @@ void test_vloxseg3ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_tum(
@@ -574,7 +574,7 @@ void test_vloxseg3ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_tum(
@@ -589,7 +589,7 @@ void test_vloxseg3ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_tum(
@@ -604,7 +604,7 @@ void test_vloxseg3ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_tum(
@@ -619,7 +619,7 @@ void test_vloxseg3ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vloxseg3ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_tum(
@@ -649,7 +649,7 @@ void test_vloxseg3ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_tum(
@@ -664,7 +664,7 @@ void test_vloxseg3ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_tum(
@@ -679,7 +679,7 @@ void test_vloxseg3ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_tum(
@@ -694,7 +694,7 @@ void test_vloxseg3ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_tum(
@@ -709,7 +709,7 @@ void test_vloxseg3ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_tum(
@@ -724,7 +724,7 @@ void test_vloxseg3ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vloxseg3ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_tum(
@@ -754,7 +754,7 @@ void test_vloxseg3ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_tum(
@@ -769,7 +769,7 @@ void test_vloxseg3ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_tum(
@@ -784,7 +784,7 @@ void test_vloxseg3ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_tum(
@@ -799,7 +799,7 @@ void test_vloxseg3ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_tum(
@@ -814,7 +814,7 @@ void test_vloxseg3ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_tum(
@@ -829,7 +829,7 @@ void test_vloxseg3ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vloxseg3ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_tum(
@@ -859,7 +859,7 @@ void test_vloxseg3ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_tum(
@@ -874,7 +874,7 @@ void test_vloxseg3ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_tum(
@@ -889,7 +889,7 @@ void test_vloxseg3ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_tum(
@@ -904,7 +904,7 @@ void test_vloxseg3ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_tum(
@@ -919,7 +919,7 @@ void test_vloxseg3ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_tum(
@@ -934,7 +934,7 @@ void test_vloxseg3ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vloxseg3ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_tum(
@@ -964,7 +964,7 @@ void test_vloxseg3ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_tum(
@@ -979,7 +979,7 @@ void test_vloxseg3ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_tum(
@@ -994,7 +994,7 @@ void test_vloxseg3ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_tum(
@@ -1009,7 +1009,7 @@ void test_vloxseg3ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_tum(
@@ -1024,7 +1024,7 @@ void test_vloxseg3ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_tum(
@@ -1039,7 +1039,7 @@ void test_vloxseg3ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg3ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_tum(
@@ -1069,7 +1069,7 @@ void test_vloxseg3ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_tum(
@@ -1084,7 +1084,7 @@ void test_vloxseg3ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_tum(
@@ -1099,7 +1099,7 @@ void test_vloxseg3ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_tum(
@@ -1114,7 +1114,7 @@ void test_vloxseg3ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_tumu(
@@ -1129,7 +1129,7 @@ void test_vloxseg3ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_tumu(
@@ -1144,7 +1144,7 @@ void test_vloxseg3ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vloxseg3ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_tumu(
@@ -1174,7 +1174,7 @@ void test_vloxseg3ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_tumu(
@@ -1189,7 +1189,7 @@ void test_vloxseg3ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_tumu(
@@ -1204,7 +1204,7 @@ void test_vloxseg3ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_tumu(
@@ -1219,7 +1219,7 @@ void test_vloxseg3ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_tumu(
@@ -1234,7 +1234,7 @@ void test_vloxseg3ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_tumu(
@@ -1249,7 +1249,7 @@ void test_vloxseg3ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_tumu(
@@ -1264,7 +1264,7 @@ void test_vloxseg3ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vloxseg3ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_tumu(
@@ -1294,7 +1294,7 @@ void test_vloxseg3ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_tumu(
@@ -1309,7 +1309,7 @@ void test_vloxseg3ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_tumu(
@@ -1324,7 +1324,7 @@ void test_vloxseg3ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_tumu(
@@ -1339,7 +1339,7 @@ void test_vloxseg3ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vloxseg3ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg3ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_tumu(
@@ -1384,7 +1384,7 @@ void test_vloxseg3ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_tumu(
@@ -1399,7 +1399,7 @@ void test_vloxseg3ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_tumu(
@@ -1414,7 +1414,7 @@ void test_vloxseg3ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg3ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_tumu(
@@ -1444,7 +1444,7 @@ void test_vloxseg3ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_tumu(
@@ -1459,7 +1459,7 @@ void test_vloxseg3ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_tumu(
@@ -1474,7 +1474,7 @@ void test_vloxseg3ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_tumu(
@@ -1489,7 +1489,7 @@ void test_vloxseg3ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_tumu(
@@ -1504,7 +1504,7 @@ void test_vloxseg3ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_tumu(
@@ -1519,7 +1519,7 @@ void test_vloxseg3ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_tumu(
@@ -1534,7 +1534,7 @@ void test_vloxseg3ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_tumu(
@@ -1549,7 +1549,7 @@ void test_vloxseg3ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vloxseg3ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg3ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_tumu(
@@ -1594,7 +1594,7 @@ void test_vloxseg3ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_tumu(
@@ -1609,7 +1609,7 @@ void test_vloxseg3ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_tumu(
@@ -1624,7 +1624,7 @@ void test_vloxseg3ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_tumu(
@@ -1639,7 +1639,7 @@ void test_vloxseg3ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_tumu(
@@ -1654,7 +1654,7 @@ void test_vloxseg3ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_tumu(
@@ -1669,7 +1669,7 @@ void test_vloxseg3ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_mu(
@@ -1684,7 +1684,7 @@ void test_vloxseg3ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_mu(
@@ -1699,7 +1699,7 @@ void test_vloxseg3ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_mu(
@@ -1714,7 +1714,7 @@ void test_vloxseg3ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_mu(
@@ -1729,7 +1729,7 @@ void test_vloxseg3ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_mu(
@@ -1744,7 +1744,7 @@ void test_vloxseg3ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_mu(
@@ -1759,7 +1759,7 @@ void test_vloxseg3ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_mu(
@@ -1774,7 +1774,7 @@ void test_vloxseg3ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_mu(
@@ -1789,7 +1789,7 @@ void test_vloxseg3ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_mu(
@@ -1804,7 +1804,7 @@ void test_vloxseg3ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_mu(
@@ -1819,7 +1819,7 @@ void test_vloxseg3ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_mu(
@@ -1834,7 +1834,7 @@ void test_vloxseg3ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_mu(
@@ -1849,7 +1849,7 @@ void test_vloxseg3ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_mu(
@@ -1864,7 +1864,7 @@ void test_vloxseg3ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_mu(
@@ -1879,7 +1879,7 @@ void test_vloxseg3ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_mu(
@@ -1894,7 +1894,7 @@ void test_vloxseg3ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_mu(
@@ -1909,7 +1909,7 @@ void test_vloxseg3ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_mu(
@@ -1924,7 +1924,7 @@ void test_vloxseg3ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_mu(
@@ -1939,7 +1939,7 @@ void test_vloxseg3ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vloxseg3ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_mu(
@@ -1969,7 +1969,7 @@ void test_vloxseg3ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_mu(
@@ -1984,7 +1984,7 @@ void test_vloxseg3ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_mu(
@@ -1999,7 +1999,7 @@ void test_vloxseg3ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_mu(
@@ -2014,7 +2014,7 @@ void test_vloxseg3ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_mu(
@@ -2029,7 +2029,7 @@ void test_vloxseg3ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_mu(
@@ -2044,7 +2044,7 @@ void test_vloxseg3ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_mu(
@@ -2059,7 +2059,7 @@ void test_vloxseg3ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_mu(
@@ -2074,7 +2074,7 @@ void test_vloxseg3ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_mu(
@@ -2089,7 +2089,7 @@ void test_vloxseg3ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg3ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_mu(
@@ -2119,7 +2119,7 @@ void test_vloxseg3ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_mu(
@@ -2134,7 +2134,7 @@ void test_vloxseg3ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_mu(
@@ -2149,7 +2149,7 @@ void test_vloxseg3ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_mu(
@@ -2164,7 +2164,7 @@ void test_vloxseg3ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_mu(
@@ -2179,7 +2179,7 @@ void test_vloxseg3ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_mu(
@@ -2194,7 +2194,7 @@ void test_vloxseg3ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_mu(
@@ -2209,7 +2209,7 @@ void test_vloxseg3ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_mu(
@@ -2224,6 +2224,6 @@ void test_vloxseg3ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg3ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vloxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c
index a0a203eeaeb8..fbf2724c15c6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vloxseg4ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vloxseg4ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vloxseg4ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_tu(
@@ -89,7 +89,7 @@ void test_vloxseg4ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_tu(
@@ -106,7 +106,7 @@ void test_vloxseg4ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_tu(
@@ -123,7 +123,7 @@ void test_vloxseg4ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_tu(
@@ -140,7 +140,7 @@ void test_vloxseg4ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_tu(
@@ -157,7 +157,7 @@ void test_vloxseg4ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_tu(
@@ -174,7 +174,7 @@ void test_vloxseg4ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_tu(
@@ -191,7 +191,7 @@ void test_vloxseg4ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_tu(
@@ -208,7 +208,7 @@ void test_vloxseg4ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_tu(
@@ -225,7 +225,7 @@ void test_vloxseg4ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_tu(
@@ -242,7 +242,7 @@ void test_vloxseg4ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_tu(
@@ -259,7 +259,7 @@ void test_vloxseg4ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_tu(
@@ -276,7 +276,7 @@ void test_vloxseg4ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_tu(
@@ -293,7 +293,7 @@ void test_vloxseg4ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_tu(
@@ -310,7 +310,7 @@ void test_vloxseg4ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_tu(
@@ -327,7 +327,7 @@ void test_vloxseg4ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_tu(
@@ -344,7 +344,7 @@ void test_vloxseg4ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_tu(
@@ -361,7 +361,7 @@ void test_vloxseg4ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_tu(
@@ -378,7 +378,7 @@ void test_vloxseg4ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_tu(
@@ -395,7 +395,7 @@ void test_vloxseg4ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_tu(
@@ -412,7 +412,7 @@ void test_vloxseg4ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_tu(
@@ -429,7 +429,7 @@ void test_vloxseg4ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_tu(
@@ -446,7 +446,7 @@ void test_vloxseg4ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_tu(
@@ -463,7 +463,7 @@ void test_vloxseg4ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_tu(
@@ -480,7 +480,7 @@ void test_vloxseg4ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_tu(
@@ -497,7 +497,7 @@ void test_vloxseg4ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_tu(
@@ -514,7 +514,7 @@ void test_vloxseg4ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_tu(
@@ -531,7 +531,7 @@ void test_vloxseg4ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_tu(
@@ -548,7 +548,7 @@ void test_vloxseg4ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_tu(
@@ -565,7 +565,7 @@ void test_vloxseg4ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_tu(
@@ -582,7 +582,7 @@ void test_vloxseg4ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_tu(
@@ -599,7 +599,7 @@ void test_vloxseg4ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_tu(
@@ -616,7 +616,7 @@ void test_vloxseg4ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_tu(
@@ -633,7 +633,7 @@ void test_vloxseg4ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_tum(
@@ -650,7 +650,7 @@ void test_vloxseg4ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_tum(
@@ -667,7 +667,7 @@ void test_vloxseg4ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_tum(
@@ -684,7 +684,7 @@ void test_vloxseg4ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_tum(
@@ -701,7 +701,7 @@ void test_vloxseg4ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_tum(
@@ -718,7 +718,7 @@ void test_vloxseg4ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_tum(
@@ -735,7 +735,7 @@ void test_vloxseg4ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_tum(
@@ -752,7 +752,7 @@ void test_vloxseg4ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_tum(
@@ -769,7 +769,7 @@ void test_vloxseg4ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_tum(
@@ -786,7 +786,7 @@ void test_vloxseg4ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_tum(
@@ -803,7 +803,7 @@ void test_vloxseg4ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_tum(
@@ -820,7 +820,7 @@ void test_vloxseg4ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_tum(
@@ -837,7 +837,7 @@ void test_vloxseg4ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_tum(
@@ -854,7 +854,7 @@ void test_vloxseg4ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_tum(
@@ -871,7 +871,7 @@ void test_vloxseg4ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_tum(
@@ -888,7 +888,7 @@ void test_vloxseg4ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_tum(
@@ -905,7 +905,7 @@ void test_vloxseg4ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_tum(
@@ -922,7 +922,7 @@ void test_vloxseg4ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_tum(
@@ -939,7 +939,7 @@ void test_vloxseg4ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_tum(
@@ -956,7 +956,7 @@ void test_vloxseg4ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_tum(
@@ -973,7 +973,7 @@ void test_vloxseg4ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_tum(
@@ -990,7 +990,7 @@ void test_vloxseg4ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_tum(
@@ -1007,7 +1007,7 @@ void test_vloxseg4ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_tum(
@@ -1024,7 +1024,7 @@ void test_vloxseg4ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_tum(
@@ -1041,7 +1041,7 @@ void test_vloxseg4ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_tum(
@@ -1058,7 +1058,7 @@ void test_vloxseg4ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_tum(
@@ -1075,7 +1075,7 @@ void test_vloxseg4ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_tum(
@@ -1092,7 +1092,7 @@ void test_vloxseg4ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_tum(
@@ -1109,7 +1109,7 @@ void test_vloxseg4ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_tum(
@@ -1126,7 +1126,7 @@ void test_vloxseg4ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_tum(
@@ -1143,7 +1143,7 @@ void test_vloxseg4ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_tum(
@@ -1160,7 +1160,7 @@ void test_vloxseg4ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_tum(
@@ -1177,7 +1177,7 @@ void test_vloxseg4ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_tum(
@@ -1194,7 +1194,7 @@ void test_vloxseg4ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_tum(
@@ -1211,7 +1211,7 @@ void test_vloxseg4ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_tum(
@@ -1228,7 +1228,7 @@ void test_vloxseg4ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_tum(
@@ -1245,7 +1245,7 @@ void test_vloxseg4ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_tum(
@@ -1262,7 +1262,7 @@ void test_vloxseg4ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vloxseg4ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_tumu(
@@ -1296,7 +1296,7 @@ void test_vloxseg4ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_tumu(
@@ -1313,7 +1313,7 @@ void test_vloxseg4ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_tumu(
@@ -1330,7 +1330,7 @@ void test_vloxseg4ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_tumu(
@@ -1347,7 +1347,7 @@ void test_vloxseg4ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_tumu(
@@ -1364,7 +1364,7 @@ void test_vloxseg4ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_tumu(
@@ -1381,7 +1381,7 @@ void test_vloxseg4ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_tumu(
@@ -1398,7 +1398,7 @@ void test_vloxseg4ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_tumu(
@@ -1415,7 +1415,7 @@ void test_vloxseg4ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_tumu(
@@ -1432,7 +1432,7 @@ void test_vloxseg4ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_tumu(
@@ -1449,7 +1449,7 @@ void test_vloxseg4ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_tumu(
@@ -1466,7 +1466,7 @@ void test_vloxseg4ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_tumu(
@@ -1483,7 +1483,7 @@ void test_vloxseg4ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_tumu(
@@ -1500,7 +1500,7 @@ void test_vloxseg4ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_tumu(
@@ -1517,7 +1517,7 @@ void test_vloxseg4ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_tumu(
@@ -1534,7 +1534,7 @@ void test_vloxseg4ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vloxseg4ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_tumu(
@@ -1568,7 +1568,7 @@ void test_vloxseg4ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_tumu(
@@ -1585,7 +1585,7 @@ void test_vloxseg4ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_tumu(
@@ -1602,7 +1602,7 @@ void test_vloxseg4ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_tumu(
@@ -1619,7 +1619,7 @@ void test_vloxseg4ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_tumu(
@@ -1636,7 +1636,7 @@ void test_vloxseg4ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_tumu(
@@ -1653,7 +1653,7 @@ void test_vloxseg4ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_tumu(
@@ -1670,7 +1670,7 @@ void test_vloxseg4ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_tumu(
@@ -1687,7 +1687,7 @@ void test_vloxseg4ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_tumu(
@@ -1704,7 +1704,7 @@ void test_vloxseg4ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_tumu(
@@ -1721,7 +1721,7 @@ void test_vloxseg4ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_tumu(
@@ -1738,7 +1738,7 @@ void test_vloxseg4ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_tumu(
@@ -1755,7 +1755,7 @@ void test_vloxseg4ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_tumu(
@@ -1772,7 +1772,7 @@ void test_vloxseg4ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_tumu(
@@ -1789,7 +1789,7 @@ void test_vloxseg4ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_tumu(
@@ -1806,7 +1806,7 @@ void test_vloxseg4ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_tumu(
@@ -1823,7 +1823,7 @@ void test_vloxseg4ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_tumu(
@@ -1840,7 +1840,7 @@ void test_vloxseg4ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_tumu(
@@ -1857,7 +1857,7 @@ void test_vloxseg4ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_tumu(
@@ -1874,7 +1874,7 @@ void test_vloxseg4ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_tumu(
@@ -1891,7 +1891,7 @@ void test_vloxseg4ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_mu(
@@ -1908,7 +1908,7 @@ void test_vloxseg4ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_mu(
@@ -1925,7 +1925,7 @@ void test_vloxseg4ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_mu(
@@ -1942,7 +1942,7 @@ void test_vloxseg4ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_mu(
@@ -1959,7 +1959,7 @@ void test_vloxseg4ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_mu(
@@ -1976,7 +1976,7 @@ void test_vloxseg4ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_mu(
@@ -1993,7 +1993,7 @@ void test_vloxseg4ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_mu(
@@ -2010,7 +2010,7 @@ void test_vloxseg4ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_mu(
@@ -2027,7 +2027,7 @@ void test_vloxseg4ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_mu(
@@ -2044,7 +2044,7 @@ void test_vloxseg4ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_mu(
@@ -2061,7 +2061,7 @@ void test_vloxseg4ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_mu(
@@ -2078,7 +2078,7 @@ void test_vloxseg4ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_mu(
@@ -2095,7 +2095,7 @@ void test_vloxseg4ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_mu(
@@ -2112,7 +2112,7 @@ void test_vloxseg4ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_mu(
@@ -2129,7 +2129,7 @@ void test_vloxseg4ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_mu(
@@ -2146,7 +2146,7 @@ void test_vloxseg4ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_mu(
@@ -2163,7 +2163,7 @@ void test_vloxseg4ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_mu(
@@ -2180,7 +2180,7 @@ void test_vloxseg4ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_mu(
@@ -2197,7 +2197,7 @@ void test_vloxseg4ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vloxseg4ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_mu(
@@ -2231,7 +2231,7 @@ void test_vloxseg4ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_mu(
@@ -2248,7 +2248,7 @@ void test_vloxseg4ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_mu(
@@ -2265,7 +2265,7 @@ void test_vloxseg4ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_mu(
@@ -2282,7 +2282,7 @@ void test_vloxseg4ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_mu(
@@ -2299,7 +2299,7 @@ void test_vloxseg4ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_mu(
@@ -2316,7 +2316,7 @@ void test_vloxseg4ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_mu(
@@ -2333,7 +2333,7 @@ void test_vloxseg4ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_mu(
@@ -2350,7 +2350,7 @@ void test_vloxseg4ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_mu(
@@ -2367,7 +2367,7 @@ void test_vloxseg4ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_mu(
@@ -2384,7 +2384,7 @@ void test_vloxseg4ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_mu(
@@ -2401,7 +2401,7 @@ void test_vloxseg4ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_mu(
@@ -2418,7 +2418,7 @@ void test_vloxseg4ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_mu(
@@ -2435,7 +2435,7 @@ void test_vloxseg4ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_mu(
@@ -2452,7 +2452,7 @@ void test_vloxseg4ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_mu(
@@ -2469,7 +2469,7 @@ void test_vloxseg4ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_mu(
@@ -2486,7 +2486,7 @@ void test_vloxseg4ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_mu(
@@ -2503,7 +2503,7 @@ void test_vloxseg4ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_mu(
@@ -2520,6 +2520,6 @@ void test_vloxseg4ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c
index 6f588eb48680..a537154289fd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vloxseg4ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vloxseg4ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vloxseg4ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_tu(
@@ -89,7 +89,7 @@ void test_vloxseg4ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_tu(
@@ -106,7 +106,7 @@ void test_vloxseg4ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_tu(
@@ -123,7 +123,7 @@ void test_vloxseg4ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_tu(
@@ -140,7 +140,7 @@ void test_vloxseg4ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_tu(
@@ -157,7 +157,7 @@ void test_vloxseg4ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_tu(
@@ -174,7 +174,7 @@ void test_vloxseg4ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_tu(
@@ -191,7 +191,7 @@ void test_vloxseg4ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_tu(
@@ -208,7 +208,7 @@ void test_vloxseg4ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_tu(
@@ -225,7 +225,7 @@ void test_vloxseg4ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_tu(
@@ -242,7 +242,7 @@ void test_vloxseg4ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_tu(
@@ -259,7 +259,7 @@ void test_vloxseg4ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_tu(
@@ -276,7 +276,7 @@ void test_vloxseg4ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_tu(
@@ -293,7 +293,7 @@ void test_vloxseg4ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_tu(
@@ -310,7 +310,7 @@ void test_vloxseg4ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_tu(
@@ -327,7 +327,7 @@ void test_vloxseg4ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_tu(
@@ -344,7 +344,7 @@ void test_vloxseg4ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_tu(
@@ -361,7 +361,7 @@ void test_vloxseg4ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_tu(
@@ -378,7 +378,7 @@ void test_vloxseg4ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_tu(
@@ -395,7 +395,7 @@ void test_vloxseg4ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_tu(
@@ -412,7 +412,7 @@ void test_vloxseg4ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_tu(
@@ -429,7 +429,7 @@ void test_vloxseg4ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_tu(
@@ -446,7 +446,7 @@ void test_vloxseg4ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_tu(
@@ -463,7 +463,7 @@ void test_vloxseg4ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_tu(
@@ -480,7 +480,7 @@ void test_vloxseg4ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_tu(
@@ -497,7 +497,7 @@ void test_vloxseg4ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_tu(
@@ -514,7 +514,7 @@ void test_vloxseg4ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_tu(
@@ -531,7 +531,7 @@ void test_vloxseg4ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_tu(
@@ -548,7 +548,7 @@ void test_vloxseg4ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_tu(
@@ -565,7 +565,7 @@ void test_vloxseg4ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_tu(
@@ -582,7 +582,7 @@ void test_vloxseg4ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_tu(
@@ -599,7 +599,7 @@ void test_vloxseg4ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_tu(
@@ -616,7 +616,7 @@ void test_vloxseg4ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_tu(
@@ -633,7 +633,7 @@ void test_vloxseg4ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_tum(
@@ -650,7 +650,7 @@ void test_vloxseg4ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_tum(
@@ -667,7 +667,7 @@ void test_vloxseg4ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_tum(
@@ -684,7 +684,7 @@ void test_vloxseg4ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_tum(
@@ -701,7 +701,7 @@ void test_vloxseg4ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_tum(
@@ -718,7 +718,7 @@ void test_vloxseg4ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_tum(
@@ -735,7 +735,7 @@ void test_vloxseg4ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_tum(
@@ -752,7 +752,7 @@ void test_vloxseg4ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_tum(
@@ -769,7 +769,7 @@ void test_vloxseg4ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_tum(
@@ -786,7 +786,7 @@ void test_vloxseg4ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_tum(
@@ -803,7 +803,7 @@ void test_vloxseg4ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_tum(
@@ -820,7 +820,7 @@ void test_vloxseg4ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_tum(
@@ -837,7 +837,7 @@ void test_vloxseg4ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_tum(
@@ -854,7 +854,7 @@ void test_vloxseg4ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_tum(
@@ -871,7 +871,7 @@ void test_vloxseg4ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_tum(
@@ -888,7 +888,7 @@ void test_vloxseg4ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_tum(
@@ -905,7 +905,7 @@ void test_vloxseg4ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_tum(
@@ -922,7 +922,7 @@ void test_vloxseg4ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_tum(
@@ -939,7 +939,7 @@ void test_vloxseg4ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_tum(
@@ -956,7 +956,7 @@ void test_vloxseg4ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_tum(
@@ -973,7 +973,7 @@ void test_vloxseg4ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_tum(
@@ -990,7 +990,7 @@ void test_vloxseg4ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_tum(
@@ -1007,7 +1007,7 @@ void test_vloxseg4ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_tum(
@@ -1024,7 +1024,7 @@ void test_vloxseg4ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_tum(
@@ -1041,7 +1041,7 @@ void test_vloxseg4ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_tum(
@@ -1058,7 +1058,7 @@ void test_vloxseg4ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_tum(
@@ -1075,7 +1075,7 @@ void test_vloxseg4ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_tum(
@@ -1092,7 +1092,7 @@ void test_vloxseg4ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_tum(
@@ -1109,7 +1109,7 @@ void test_vloxseg4ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_tum(
@@ -1126,7 +1126,7 @@ void test_vloxseg4ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_tum(
@@ -1143,7 +1143,7 @@ void test_vloxseg4ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_tum(
@@ -1160,7 +1160,7 @@ void test_vloxseg4ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_tum(
@@ -1177,7 +1177,7 @@ void test_vloxseg4ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_tum(
@@ -1194,7 +1194,7 @@ void test_vloxseg4ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_tum(
@@ -1211,7 +1211,7 @@ void test_vloxseg4ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_tum(
@@ -1228,7 +1228,7 @@ void test_vloxseg4ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_tum(
@@ -1245,7 +1245,7 @@ void test_vloxseg4ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_tum(
@@ -1262,7 +1262,7 @@ void test_vloxseg4ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vloxseg4ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_tumu(
@@ -1296,7 +1296,7 @@ void test_vloxseg4ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_tumu(
@@ -1313,7 +1313,7 @@ void test_vloxseg4ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_tumu(
@@ -1330,7 +1330,7 @@ void test_vloxseg4ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_tumu(
@@ -1347,7 +1347,7 @@ void test_vloxseg4ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_tumu(
@@ -1364,7 +1364,7 @@ void test_vloxseg4ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_tumu(
@@ -1381,7 +1381,7 @@ void test_vloxseg4ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_tumu(
@@ -1398,7 +1398,7 @@ void test_vloxseg4ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_tumu(
@@ -1415,7 +1415,7 @@ void test_vloxseg4ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_tumu(
@@ -1432,7 +1432,7 @@ void test_vloxseg4ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_tumu(
@@ -1449,7 +1449,7 @@ void test_vloxseg4ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_tumu(
@@ -1466,7 +1466,7 @@ void test_vloxseg4ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_tumu(
@@ -1483,7 +1483,7 @@ void test_vloxseg4ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_tumu(
@@ -1500,7 +1500,7 @@ void test_vloxseg4ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_tumu(
@@ -1517,7 +1517,7 @@ void test_vloxseg4ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_tumu(
@@ -1534,7 +1534,7 @@ void test_vloxseg4ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vloxseg4ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_tumu(
@@ -1568,7 +1568,7 @@ void test_vloxseg4ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_tumu(
@@ -1585,7 +1585,7 @@ void test_vloxseg4ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_tumu(
@@ -1602,7 +1602,7 @@ void test_vloxseg4ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_tumu(
@@ -1619,7 +1619,7 @@ void test_vloxseg4ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_tumu(
@@ -1636,7 +1636,7 @@ void test_vloxseg4ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_tumu(
@@ -1653,7 +1653,7 @@ void test_vloxseg4ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_tumu(
@@ -1670,7 +1670,7 @@ void test_vloxseg4ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_tumu(
@@ -1687,7 +1687,7 @@ void test_vloxseg4ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_tumu(
@@ -1704,7 +1704,7 @@ void test_vloxseg4ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_tumu(
@@ -1721,7 +1721,7 @@ void test_vloxseg4ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_tumu(
@@ -1738,7 +1738,7 @@ void test_vloxseg4ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_tumu(
@@ -1755,7 +1755,7 @@ void test_vloxseg4ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_tumu(
@@ -1772,7 +1772,7 @@ void test_vloxseg4ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_tumu(
@@ -1789,7 +1789,7 @@ void test_vloxseg4ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_tumu(
@@ -1806,7 +1806,7 @@ void test_vloxseg4ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_tumu(
@@ -1823,7 +1823,7 @@ void test_vloxseg4ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_tumu(
@@ -1840,7 +1840,7 @@ void test_vloxseg4ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_tumu(
@@ -1857,7 +1857,7 @@ void test_vloxseg4ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_tumu(
@@ -1874,7 +1874,7 @@ void test_vloxseg4ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_tumu(
@@ -1891,7 +1891,7 @@ void test_vloxseg4ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_mu(
@@ -1908,7 +1908,7 @@ void test_vloxseg4ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_mu(
@@ -1925,7 +1925,7 @@ void test_vloxseg4ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_mu(
@@ -1942,7 +1942,7 @@ void test_vloxseg4ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_mu(
@@ -1959,7 +1959,7 @@ void test_vloxseg4ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_mu(
@@ -1976,7 +1976,7 @@ void test_vloxseg4ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_mu(
@@ -1993,7 +1993,7 @@ void test_vloxseg4ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_mu(
@@ -2010,7 +2010,7 @@ void test_vloxseg4ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_mu(
@@ -2027,7 +2027,7 @@ void test_vloxseg4ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_mu(
@@ -2044,7 +2044,7 @@ void test_vloxseg4ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_mu(
@@ -2061,7 +2061,7 @@ void test_vloxseg4ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_mu(
@@ -2078,7 +2078,7 @@ void test_vloxseg4ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_mu(
@@ -2095,7 +2095,7 @@ void test_vloxseg4ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_mu(
@@ -2112,7 +2112,7 @@ void test_vloxseg4ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_mu(
@@ -2129,7 +2129,7 @@ void test_vloxseg4ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_mu(
@@ -2146,7 +2146,7 @@ void test_vloxseg4ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_mu(
@@ -2163,7 +2163,7 @@ void test_vloxseg4ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_mu(
@@ -2180,7 +2180,7 @@ void test_vloxseg4ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_mu(
@@ -2197,7 +2197,7 @@ void test_vloxseg4ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vloxseg4ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_mu(
@@ -2231,7 +2231,7 @@ void test_vloxseg4ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_mu(
@@ -2248,7 +2248,7 @@ void test_vloxseg4ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_mu(
@@ -2265,7 +2265,7 @@ void test_vloxseg4ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_mu(
@@ -2282,7 +2282,7 @@ void test_vloxseg4ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_mu(
@@ -2299,7 +2299,7 @@ void test_vloxseg4ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_mu(
@@ -2316,7 +2316,7 @@ void test_vloxseg4ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_mu(
@@ -2333,7 +2333,7 @@ void test_vloxseg4ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_mu(
@@ -2350,7 +2350,7 @@ void test_vloxseg4ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_mu(
@@ -2367,7 +2367,7 @@ void test_vloxseg4ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_mu(
@@ -2384,7 +2384,7 @@ void test_vloxseg4ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_mu(
@@ -2401,7 +2401,7 @@ void test_vloxseg4ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_mu(
@@ -2418,7 +2418,7 @@ void test_vloxseg4ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_mu(
@@ -2435,7 +2435,7 @@ void test_vloxseg4ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_mu(
@@ -2452,7 +2452,7 @@ void test_vloxseg4ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_mu(
@@ -2469,7 +2469,7 @@ void test_vloxseg4ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_mu(
@@ -2486,7 +2486,7 @@ void test_vloxseg4ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_mu(
@@ -2503,7 +2503,7 @@ void test_vloxseg4ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_mu(
@@ -2520,6 +2520,6 @@ void test_vloxseg4ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c
index d9957492df73..b56b22794883 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vloxseg4ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vloxseg4ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vloxseg4ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_tu(
@@ -89,7 +89,7 @@ void test_vloxseg4ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_tu(
@@ -106,7 +106,7 @@ void test_vloxseg4ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_tu(
@@ -123,7 +123,7 @@ void test_vloxseg4ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_tu(
@@ -140,7 +140,7 @@ void test_vloxseg4ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_tu(
@@ -157,7 +157,7 @@ void test_vloxseg4ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_tu(
@@ -174,7 +174,7 @@ void test_vloxseg4ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_tu(
@@ -191,7 +191,7 @@ void test_vloxseg4ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_tu(
@@ -208,7 +208,7 @@ void test_vloxseg4ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_tu(
@@ -225,7 +225,7 @@ void test_vloxseg4ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_tu(
@@ -242,7 +242,7 @@ void test_vloxseg4ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_tu(
@@ -259,7 +259,7 @@ void test_vloxseg4ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_tu(
@@ -276,7 +276,7 @@ void test_vloxseg4ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_tu(
@@ -293,7 +293,7 @@ void test_vloxseg4ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_tu(
@@ -310,7 +310,7 @@ void test_vloxseg4ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_tu(
@@ -327,7 +327,7 @@ void test_vloxseg4ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_tu(
@@ -344,7 +344,7 @@ void test_vloxseg4ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_tu(
@@ -361,7 +361,7 @@ void test_vloxseg4ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_tu(
@@ -378,7 +378,7 @@ void test_vloxseg4ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vloxseg4ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_tu(
@@ -412,7 +412,7 @@ void test_vloxseg4ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_tu(
@@ -429,7 +429,7 @@ void test_vloxseg4ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_tu(
@@ -446,7 +446,7 @@ void test_vloxseg4ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_tu(
@@ -463,7 +463,7 @@ void test_vloxseg4ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_tu(
@@ -480,7 +480,7 @@ void test_vloxseg4ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_tu(
@@ -497,7 +497,7 @@ void test_vloxseg4ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_tu(
@@ -514,7 +514,7 @@ void test_vloxseg4ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_tu(
@@ -531,7 +531,7 @@ void test_vloxseg4ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_tu(
@@ -548,7 +548,7 @@ void test_vloxseg4ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_tu(
@@ -565,7 +565,7 @@ void test_vloxseg4ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_tu(
@@ -582,7 +582,7 @@ void test_vloxseg4ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_tu(
@@ -599,7 +599,7 @@ void test_vloxseg4ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_tum(
@@ -616,7 +616,7 @@ void test_vloxseg4ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_tum(
@@ -633,7 +633,7 @@ void test_vloxseg4ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_tum(
@@ -650,7 +650,7 @@ void test_vloxseg4ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_tum(
@@ -667,7 +667,7 @@ void test_vloxseg4ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_tum(
@@ -684,7 +684,7 @@ void test_vloxseg4ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_tum(
@@ -701,7 +701,7 @@ void test_vloxseg4ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_tum(
@@ -718,7 +718,7 @@ void test_vloxseg4ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_tum(
@@ -735,7 +735,7 @@ void test_vloxseg4ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_tum(
@@ -752,7 +752,7 @@ void test_vloxseg4ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_tum(
@@ -769,7 +769,7 @@ void test_vloxseg4ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vloxseg4ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_tum(
@@ -803,7 +803,7 @@ void test_vloxseg4ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_tum(
@@ -820,7 +820,7 @@ void test_vloxseg4ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_tum(
@@ -837,7 +837,7 @@ void test_vloxseg4ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_tum(
@@ -854,7 +854,7 @@ void test_vloxseg4ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_tum(
@@ -871,7 +871,7 @@ void test_vloxseg4ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_tum(
@@ -888,7 +888,7 @@ void test_vloxseg4ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_tum(
@@ -905,7 +905,7 @@ void test_vloxseg4ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_tum(
@@ -922,7 +922,7 @@ void test_vloxseg4ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_tum(
@@ -939,7 +939,7 @@ void test_vloxseg4ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_tum(
@@ -956,7 +956,7 @@ void test_vloxseg4ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_tum(
@@ -973,7 +973,7 @@ void test_vloxseg4ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_tum(
@@ -990,7 +990,7 @@ void test_vloxseg4ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_tum(
@@ -1007,7 +1007,7 @@ void test_vloxseg4ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_tum(
@@ -1024,7 +1024,7 @@ void test_vloxseg4ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_tum(
@@ -1041,7 +1041,7 @@ void test_vloxseg4ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_tum(
@@ -1058,7 +1058,7 @@ void test_vloxseg4ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_tum(
@@ -1075,7 +1075,7 @@ void test_vloxseg4ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_tum(
@@ -1092,7 +1092,7 @@ void test_vloxseg4ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_tum(
@@ -1109,7 +1109,7 @@ void test_vloxseg4ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_tum(
@@ -1126,7 +1126,7 @@ void test_vloxseg4ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_tum(
@@ -1143,7 +1143,7 @@ void test_vloxseg4ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_tum(
@@ -1160,7 +1160,7 @@ void test_vloxseg4ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_tum(
@@ -1177,7 +1177,7 @@ void test_vloxseg4ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_tum(
@@ -1194,7 +1194,7 @@ void test_vloxseg4ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_tumu(
@@ -1211,7 +1211,7 @@ void test_vloxseg4ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_tumu(
@@ -1228,7 +1228,7 @@ void test_vloxseg4ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_tumu(
@@ -1245,7 +1245,7 @@ void test_vloxseg4ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_tumu(
@@ -1262,7 +1262,7 @@ void test_vloxseg4ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_tumu(
@@ -1279,7 +1279,7 @@ void test_vloxseg4ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vloxseg4ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_tumu(
@@ -1313,7 +1313,7 @@ void test_vloxseg4ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_tumu(
@@ -1330,7 +1330,7 @@ void test_vloxseg4ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_tumu(
@@ -1347,7 +1347,7 @@ void test_vloxseg4ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_tumu(
@@ -1364,7 +1364,7 @@ void test_vloxseg4ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_tumu(
@@ -1381,7 +1381,7 @@ void test_vloxseg4ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_tumu(
@@ -1398,7 +1398,7 @@ void test_vloxseg4ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_tumu(
@@ -1415,7 +1415,7 @@ void test_vloxseg4ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_tumu(
@@ -1432,7 +1432,7 @@ void test_vloxseg4ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_tumu(
@@ -1449,7 +1449,7 @@ void test_vloxseg4ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_tumu(
@@ -1466,7 +1466,7 @@ void test_vloxseg4ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_tumu(
@@ -1483,7 +1483,7 @@ void test_vloxseg4ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_tumu(
@@ -1500,7 +1500,7 @@ void test_vloxseg4ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_tumu(
@@ -1517,7 +1517,7 @@ void test_vloxseg4ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_tumu(
@@ -1534,7 +1534,7 @@ void test_vloxseg4ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vloxseg4ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_tumu(
@@ -1568,7 +1568,7 @@ void test_vloxseg4ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_tumu(
@@ -1585,7 +1585,7 @@ void test_vloxseg4ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_tumu(
@@ -1602,7 +1602,7 @@ void test_vloxseg4ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_tumu(
@@ -1619,7 +1619,7 @@ void test_vloxseg4ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_tumu(
@@ -1636,7 +1636,7 @@ void test_vloxseg4ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_tumu(
@@ -1653,7 +1653,7 @@ void test_vloxseg4ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_tumu(
@@ -1670,7 +1670,7 @@ void test_vloxseg4ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_tumu(
@@ -1687,7 +1687,7 @@ void test_vloxseg4ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_tumu(
@@ -1704,7 +1704,7 @@ void test_vloxseg4ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_tumu(
@@ -1721,7 +1721,7 @@ void test_vloxseg4ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_tumu(
@@ -1738,7 +1738,7 @@ void test_vloxseg4ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_tumu(
@@ -1755,7 +1755,7 @@ void test_vloxseg4ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_tumu(
@@ -1772,7 +1772,7 @@ void test_vloxseg4ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_tumu(
@@ -1789,7 +1789,7 @@ void test_vloxseg4ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_mu(
@@ -1806,7 +1806,7 @@ void test_vloxseg4ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_mu(
@@ -1823,7 +1823,7 @@ void test_vloxseg4ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_mu(
@@ -1840,7 +1840,7 @@ void test_vloxseg4ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_mu(
@@ -1857,7 +1857,7 @@ void test_vloxseg4ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_mu(
@@ -1874,7 +1874,7 @@ void test_vloxseg4ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_mu(
@@ -1891,7 +1891,7 @@ void test_vloxseg4ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_mu(
@@ -1908,7 +1908,7 @@ void test_vloxseg4ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_mu(
@@ -1925,7 +1925,7 @@ void test_vloxseg4ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_mu(
@@ -1942,7 +1942,7 @@ void test_vloxseg4ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vloxseg4ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_mu(
@@ -1976,7 +1976,7 @@ void test_vloxseg4ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_mu(
@@ -1993,7 +1993,7 @@ void test_vloxseg4ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_mu(
@@ -2010,7 +2010,7 @@ void test_vloxseg4ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_mu(
@@ -2027,7 +2027,7 @@ void test_vloxseg4ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_mu(
@@ -2044,7 +2044,7 @@ void test_vloxseg4ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_mu(
@@ -2061,7 +2061,7 @@ void test_vloxseg4ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_mu(
@@ -2078,7 +2078,7 @@ void test_vloxseg4ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_mu(
@@ -2095,7 +2095,7 @@ void test_vloxseg4ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_mu(
@@ -2112,7 +2112,7 @@ void test_vloxseg4ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_mu(
@@ -2129,7 +2129,7 @@ void test_vloxseg4ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_mu(
@@ -2146,7 +2146,7 @@ void test_vloxseg4ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_mu(
@@ -2163,7 +2163,7 @@ void test_vloxseg4ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_mu(
@@ -2180,7 +2180,7 @@ void test_vloxseg4ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_mu(
@@ -2197,7 +2197,7 @@ void test_vloxseg4ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vloxseg4ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_mu(
@@ -2231,7 +2231,7 @@ void test_vloxseg4ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_mu(
@@ -2248,7 +2248,7 @@ void test_vloxseg4ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_mu(
@@ -2265,7 +2265,7 @@ void test_vloxseg4ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_mu(
@@ -2282,7 +2282,7 @@ void test_vloxseg4ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_mu(
@@ -2299,7 +2299,7 @@ void test_vloxseg4ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_mu(
@@ -2316,7 +2316,7 @@ void test_vloxseg4ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_mu(
@@ -2333,7 +2333,7 @@ void test_vloxseg4ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_mu(
@@ -2350,7 +2350,7 @@ void test_vloxseg4ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_mu(
@@ -2367,7 +2367,7 @@ void test_vloxseg4ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_mu(
@@ -2384,6 +2384,6 @@ void test_vloxseg4ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c
index 1e7bd14d8407..49fe71424327 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vloxseg4ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vloxseg4ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vloxseg4ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_tu(
@@ -89,7 +89,7 @@ void test_vloxseg4ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_tu(
@@ -106,7 +106,7 @@ void test_vloxseg4ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_tu(
@@ -123,7 +123,7 @@ void test_vloxseg4ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_tu(
@@ -140,7 +140,7 @@ void test_vloxseg4ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_tu(
@@ -157,7 +157,7 @@ void test_vloxseg4ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_tu(
@@ -174,7 +174,7 @@ void test_vloxseg4ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_tu(
@@ -191,7 +191,7 @@ void test_vloxseg4ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_tu(
@@ -208,7 +208,7 @@ void test_vloxseg4ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_tu(
@@ -225,7 +225,7 @@ void test_vloxseg4ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_tu(
@@ -242,7 +242,7 @@ void test_vloxseg4ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_tu(
@@ -259,7 +259,7 @@ void test_vloxseg4ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_tu(
@@ -276,7 +276,7 @@ void test_vloxseg4ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_tu(
@@ -293,7 +293,7 @@ void test_vloxseg4ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_tu(
@@ -310,7 +310,7 @@ void test_vloxseg4ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_tu(
@@ -327,7 +327,7 @@ void test_vloxseg4ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_tu(
@@ -344,7 +344,7 @@ void test_vloxseg4ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_tu(
@@ -361,7 +361,7 @@ void test_vloxseg4ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_tu(
@@ -378,7 +378,7 @@ void test_vloxseg4ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_tu(
@@ -395,7 +395,7 @@ void test_vloxseg4ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_tu(
@@ -412,7 +412,7 @@ void test_vloxseg4ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_tu(
@@ -429,7 +429,7 @@ void test_vloxseg4ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_tu(
@@ -446,7 +446,7 @@ void test_vloxseg4ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_tu(
@@ -463,7 +463,7 @@ void test_vloxseg4ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_tu(
@@ -480,7 +480,7 @@ void test_vloxseg4ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_tu(
@@ -497,7 +497,7 @@ void test_vloxseg4ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_tu(
@@ -514,7 +514,7 @@ void test_vloxseg4ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_tu(
@@ -531,7 +531,7 @@ void test_vloxseg4ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_tu(
@@ -548,7 +548,7 @@ void test_vloxseg4ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_tu(
@@ -565,7 +565,7 @@ void test_vloxseg4ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_tu(
@@ -582,7 +582,7 @@ void test_vloxseg4ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_tu(
@@ -599,7 +599,7 @@ void test_vloxseg4ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_tu(
@@ -616,7 +616,7 @@ void test_vloxseg4ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_tu(
@@ -633,7 +633,7 @@ void test_vloxseg4ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_tum(
@@ -650,7 +650,7 @@ void test_vloxseg4ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_tum(
@@ -667,7 +667,7 @@ void test_vloxseg4ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_tum(
@@ -684,7 +684,7 @@ void test_vloxseg4ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_tum(
@@ -701,7 +701,7 @@ void test_vloxseg4ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_tum(
@@ -718,7 +718,7 @@ void test_vloxseg4ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_tum(
@@ -735,7 +735,7 @@ void test_vloxseg4ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_tum(
@@ -752,7 +752,7 @@ void test_vloxseg4ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_tum(
@@ -769,7 +769,7 @@ void test_vloxseg4ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_tum(
@@ -786,7 +786,7 @@ void test_vloxseg4ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_tum(
@@ -803,7 +803,7 @@ void test_vloxseg4ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_tum(
@@ -820,7 +820,7 @@ void test_vloxseg4ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_tum(
@@ -837,7 +837,7 @@ void test_vloxseg4ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_tum(
@@ -854,7 +854,7 @@ void test_vloxseg4ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_tum(
@@ -871,7 +871,7 @@ void test_vloxseg4ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_tum(
@@ -888,7 +888,7 @@ void test_vloxseg4ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_tum(
@@ -905,7 +905,7 @@ void test_vloxseg4ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_tum(
@@ -922,7 +922,7 @@ void test_vloxseg4ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_tum(
@@ -939,7 +939,7 @@ void test_vloxseg4ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_tum(
@@ -956,7 +956,7 @@ void test_vloxseg4ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_tum(
@@ -973,7 +973,7 @@ void test_vloxseg4ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_tum(
@@ -990,7 +990,7 @@ void test_vloxseg4ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_tum(
@@ -1007,7 +1007,7 @@ void test_vloxseg4ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_tum(
@@ -1024,7 +1024,7 @@ void test_vloxseg4ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_tum(
@@ -1041,7 +1041,7 @@ void test_vloxseg4ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_tum(
@@ -1058,7 +1058,7 @@ void test_vloxseg4ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_tum(
@@ -1075,7 +1075,7 @@ void test_vloxseg4ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_tum(
@@ -1092,7 +1092,7 @@ void test_vloxseg4ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_tum(
@@ -1109,7 +1109,7 @@ void test_vloxseg4ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_tum(
@@ -1126,7 +1126,7 @@ void test_vloxseg4ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_tum(
@@ -1143,7 +1143,7 @@ void test_vloxseg4ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_tum(
@@ -1160,7 +1160,7 @@ void test_vloxseg4ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_tum(
@@ -1177,7 +1177,7 @@ void test_vloxseg4ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_tum(
@@ -1194,7 +1194,7 @@ void test_vloxseg4ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_tum(
@@ -1211,7 +1211,7 @@ void test_vloxseg4ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_tum(
@@ -1228,7 +1228,7 @@ void test_vloxseg4ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_tum(
@@ -1245,7 +1245,7 @@ void test_vloxseg4ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_tum(
@@ -1262,7 +1262,7 @@ void test_vloxseg4ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vloxseg4ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_tumu(
@@ -1296,7 +1296,7 @@ void test_vloxseg4ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_tumu(
@@ -1313,7 +1313,7 @@ void test_vloxseg4ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_tumu(
@@ -1330,7 +1330,7 @@ void test_vloxseg4ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_tumu(
@@ -1347,7 +1347,7 @@ void test_vloxseg4ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_tumu(
@@ -1364,7 +1364,7 @@ void test_vloxseg4ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_tumu(
@@ -1381,7 +1381,7 @@ void test_vloxseg4ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_tumu(
@@ -1398,7 +1398,7 @@ void test_vloxseg4ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_tumu(
@@ -1415,7 +1415,7 @@ void test_vloxseg4ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_tumu(
@@ -1432,7 +1432,7 @@ void test_vloxseg4ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_tumu(
@@ -1449,7 +1449,7 @@ void test_vloxseg4ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_tumu(
@@ -1466,7 +1466,7 @@ void test_vloxseg4ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_tumu(
@@ -1483,7 +1483,7 @@ void test_vloxseg4ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_tumu(
@@ -1500,7 +1500,7 @@ void test_vloxseg4ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_tumu(
@@ -1517,7 +1517,7 @@ void test_vloxseg4ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_tumu(
@@ -1534,7 +1534,7 @@ void test_vloxseg4ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vloxseg4ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_tumu(
@@ -1568,7 +1568,7 @@ void test_vloxseg4ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_tumu(
@@ -1585,7 +1585,7 @@ void test_vloxseg4ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_tumu(
@@ -1602,7 +1602,7 @@ void test_vloxseg4ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_tumu(
@@ -1619,7 +1619,7 @@ void test_vloxseg4ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_tumu(
@@ -1636,7 +1636,7 @@ void test_vloxseg4ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_tumu(
@@ -1653,7 +1653,7 @@ void test_vloxseg4ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_tumu(
@@ -1670,7 +1670,7 @@ void test_vloxseg4ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_tumu(
@@ -1687,7 +1687,7 @@ void test_vloxseg4ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_tumu(
@@ -1704,7 +1704,7 @@ void test_vloxseg4ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_tumu(
@@ -1721,7 +1721,7 @@ void test_vloxseg4ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_tumu(
@@ -1738,7 +1738,7 @@ void test_vloxseg4ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_tumu(
@@ -1755,7 +1755,7 @@ void test_vloxseg4ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_tumu(
@@ -1772,7 +1772,7 @@ void test_vloxseg4ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_tumu(
@@ -1789,7 +1789,7 @@ void test_vloxseg4ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_tumu(
@@ -1806,7 +1806,7 @@ void test_vloxseg4ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_tumu(
@@ -1823,7 +1823,7 @@ void test_vloxseg4ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_tumu(
@@ -1840,7 +1840,7 @@ void test_vloxseg4ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_tumu(
@@ -1857,7 +1857,7 @@ void test_vloxseg4ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_tumu(
@@ -1874,7 +1874,7 @@ void test_vloxseg4ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_tumu(
@@ -1891,7 +1891,7 @@ void test_vloxseg4ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_mu(
@@ -1908,7 +1908,7 @@ void test_vloxseg4ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_mu(
@@ -1925,7 +1925,7 @@ void test_vloxseg4ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_mu(
@@ -1942,7 +1942,7 @@ void test_vloxseg4ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_mu(
@@ -1959,7 +1959,7 @@ void test_vloxseg4ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_mu(
@@ -1976,7 +1976,7 @@ void test_vloxseg4ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_mu(
@@ -1993,7 +1993,7 @@ void test_vloxseg4ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_mu(
@@ -2010,7 +2010,7 @@ void test_vloxseg4ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_mu(
@@ -2027,7 +2027,7 @@ void test_vloxseg4ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_mu(
@@ -2044,7 +2044,7 @@ void test_vloxseg4ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_mu(
@@ -2061,7 +2061,7 @@ void test_vloxseg4ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_mu(
@@ -2078,7 +2078,7 @@ void test_vloxseg4ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_mu(
@@ -2095,7 +2095,7 @@ void test_vloxseg4ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_mu(
@@ -2112,7 +2112,7 @@ void test_vloxseg4ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_mu(
@@ -2129,7 +2129,7 @@ void test_vloxseg4ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_mu(
@@ -2146,7 +2146,7 @@ void test_vloxseg4ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_mu(
@@ -2163,7 +2163,7 @@ void test_vloxseg4ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_mu(
@@ -2180,7 +2180,7 @@ void test_vloxseg4ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_mu(
@@ -2197,7 +2197,7 @@ void test_vloxseg4ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vloxseg4ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_mu(
@@ -2231,7 +2231,7 @@ void test_vloxseg4ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_mu(
@@ -2248,7 +2248,7 @@ void test_vloxseg4ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_mu(
@@ -2265,7 +2265,7 @@ void test_vloxseg4ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_mu(
@@ -2282,7 +2282,7 @@ void test_vloxseg4ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_mu(
@@ -2299,7 +2299,7 @@ void test_vloxseg4ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_mu(
@@ -2316,7 +2316,7 @@ void test_vloxseg4ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_mu(
@@ -2333,7 +2333,7 @@ void test_vloxseg4ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_mu(
@@ -2350,7 +2350,7 @@ void test_vloxseg4ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_mu(
@@ -2367,7 +2367,7 @@ void test_vloxseg4ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_mu(
@@ -2384,7 +2384,7 @@ void test_vloxseg4ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_mu(
@@ -2401,7 +2401,7 @@ void test_vloxseg4ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_mu(
@@ -2418,7 +2418,7 @@ void test_vloxseg4ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_mu(
@@ -2435,7 +2435,7 @@ void test_vloxseg4ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_mu(
@@ -2452,7 +2452,7 @@ void test_vloxseg4ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_mu(
@@ -2469,7 +2469,7 @@ void test_vloxseg4ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_mu(
@@ -2486,7 +2486,7 @@ void test_vloxseg4ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_mu(
@@ -2503,7 +2503,7 @@ void test_vloxseg4ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_mu(
@@ -2520,6 +2520,6 @@ void test_vloxseg4ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg4ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vloxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c
index 2d0173b04637..a2dce46944fb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vloxseg5ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vloxseg5ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_tu(
@@ -80,7 +80,7 @@ void test_vloxseg5ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_tu(
@@ -99,7 +99,7 @@ void test_vloxseg5ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_tu(
@@ -118,7 +118,7 @@ void test_vloxseg5ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_tu(
@@ -137,7 +137,7 @@ void test_vloxseg5ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_tu(
@@ -156,7 +156,7 @@ void test_vloxseg5ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_tu(
@@ -175,7 +175,7 @@ void test_vloxseg5ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_tu(
@@ -194,7 +194,7 @@ void test_vloxseg5ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_tu(
@@ -213,7 +213,7 @@ void test_vloxseg5ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_tu(
@@ -232,7 +232,7 @@ void test_vloxseg5ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_tu(
@@ -251,7 +251,7 @@ void test_vloxseg5ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_tu(
@@ -270,7 +270,7 @@ void test_vloxseg5ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vloxseg5ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_tu(
@@ -308,7 +308,7 @@ void test_vloxseg5ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_tu(
@@ -327,7 +327,7 @@ void test_vloxseg5ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_tu(
@@ -346,7 +346,7 @@ void test_vloxseg5ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_tu(
@@ -365,7 +365,7 @@ void test_vloxseg5ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_tu(
@@ -384,7 +384,7 @@ void test_vloxseg5ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_tu(
@@ -403,7 +403,7 @@ void test_vloxseg5ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_tu(
@@ -422,7 +422,7 @@ void test_vloxseg5ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_tu(
@@ -441,7 +441,7 @@ void test_vloxseg5ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_tu(
@@ -460,7 +460,7 @@ void test_vloxseg5ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_tu(
@@ -479,7 +479,7 @@ void test_vloxseg5ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_tu(
@@ -498,7 +498,7 @@ void test_vloxseg5ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_tum(
@@ -517,7 +517,7 @@ void test_vloxseg5ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_tum(
@@ -536,7 +536,7 @@ void test_vloxseg5ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_tum(
@@ -555,7 +555,7 @@ void test_vloxseg5ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_tum(
@@ -574,7 +574,7 @@ void test_vloxseg5ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_tum(
@@ -593,7 +593,7 @@ void test_vloxseg5ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_tum(
@@ -612,7 +612,7 @@ void test_vloxseg5ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_tum(
@@ -631,7 +631,7 @@ void test_vloxseg5ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_tum(
@@ -650,7 +650,7 @@ void test_vloxseg5ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_tum(
@@ -669,7 +669,7 @@ void test_vloxseg5ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_tum(
@@ -688,7 +688,7 @@ void test_vloxseg5ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_tum(
@@ -707,7 +707,7 @@ void test_vloxseg5ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_tum(
@@ -726,7 +726,7 @@ void test_vloxseg5ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_tum(
@@ -745,7 +745,7 @@ void test_vloxseg5ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_tum(
@@ -764,7 +764,7 @@ void test_vloxseg5ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_tum(
@@ -783,7 +783,7 @@ void test_vloxseg5ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_tum(
@@ -802,7 +802,7 @@ void test_vloxseg5ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_tum(
@@ -821,7 +821,7 @@ void test_vloxseg5ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_tum(
@@ -840,7 +840,7 @@ void test_vloxseg5ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_tum(
@@ -859,7 +859,7 @@ void test_vloxseg5ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_tum(
@@ -878,7 +878,7 @@ void test_vloxseg5ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_tum(
@@ -897,7 +897,7 @@ void test_vloxseg5ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_tum(
@@ -916,7 +916,7 @@ void test_vloxseg5ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_tum(
@@ -935,7 +935,7 @@ void test_vloxseg5ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_tum(
@@ -954,7 +954,7 @@ void test_vloxseg5ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_tum(
@@ -973,7 +973,7 @@ void test_vloxseg5ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_tum(
@@ -992,7 +992,7 @@ void test_vloxseg5ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_tumu(
@@ -1011,7 +1011,7 @@ void test_vloxseg5ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_tumu(
@@ -1030,7 +1030,7 @@ void test_vloxseg5ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_tumu(
@@ -1049,7 +1049,7 @@ void test_vloxseg5ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_tumu(
@@ -1068,7 +1068,7 @@ void test_vloxseg5ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_tumu(
@@ -1087,7 +1087,7 @@ void test_vloxseg5ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_tumu(
@@ -1106,7 +1106,7 @@ void test_vloxseg5ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_tumu(
@@ -1125,7 +1125,7 @@ void test_vloxseg5ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_tumu(
@@ -1144,7 +1144,7 @@ void test_vloxseg5ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_tumu(
@@ -1163,7 +1163,7 @@ void test_vloxseg5ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_tumu(
@@ -1182,7 +1182,7 @@ void test_vloxseg5ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_tumu(
@@ -1201,7 +1201,7 @@ void test_vloxseg5ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_tumu(
@@ -1220,7 +1220,7 @@ void test_vloxseg5ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vloxseg5ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_tumu(
@@ -1258,7 +1258,7 @@ void test_vloxseg5ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_tumu(
@@ -1277,7 +1277,7 @@ void test_vloxseg5ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vloxseg5ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_tumu(
@@ -1315,7 +1315,7 @@ void test_vloxseg5ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_tumu(
@@ -1334,7 +1334,7 @@ void test_vloxseg5ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_tumu(
@@ -1353,7 +1353,7 @@ void test_vloxseg5ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_tumu(
@@ -1372,7 +1372,7 @@ void test_vloxseg5ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_tumu(
@@ -1391,7 +1391,7 @@ void test_vloxseg5ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_tumu(
@@ -1410,7 +1410,7 @@ void test_vloxseg5ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg5ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_tumu(
@@ -1448,7 +1448,7 @@ void test_vloxseg5ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_tumu(
@@ -1467,7 +1467,7 @@ void test_vloxseg5ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_tumu(
@@ -1486,7 +1486,7 @@ void test_vloxseg5ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_mu(
@@ -1505,7 +1505,7 @@ void test_vloxseg5ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_mu(
@@ -1524,7 +1524,7 @@ void test_vloxseg5ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_mu(
@@ -1543,7 +1543,7 @@ void test_vloxseg5ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_mu(
@@ -1562,7 +1562,7 @@ void test_vloxseg5ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_mu(
@@ -1581,7 +1581,7 @@ void test_vloxseg5ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_mu(
@@ -1600,7 +1600,7 @@ void test_vloxseg5ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_mu(
@@ -1619,7 +1619,7 @@ void test_vloxseg5ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_mu(
@@ -1638,7 +1638,7 @@ void test_vloxseg5ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_mu(
@@ -1657,7 +1657,7 @@ void test_vloxseg5ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_mu(
@@ -1676,7 +1676,7 @@ void test_vloxseg5ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_mu(
@@ -1695,7 +1695,7 @@ void test_vloxseg5ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_mu(
@@ -1714,7 +1714,7 @@ void test_vloxseg5ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_mu(
@@ -1733,7 +1733,7 @@ void test_vloxseg5ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_mu(
@@ -1752,7 +1752,7 @@ void test_vloxseg5ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_mu(
@@ -1771,7 +1771,7 @@ void test_vloxseg5ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_mu(
@@ -1790,7 +1790,7 @@ void test_vloxseg5ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_mu(
@@ -1809,7 +1809,7 @@ void test_vloxseg5ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_mu(
@@ -1828,7 +1828,7 @@ void test_vloxseg5ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_mu(
@@ -1847,7 +1847,7 @@ void test_vloxseg5ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_mu(
@@ -1866,7 +1866,7 @@ void test_vloxseg5ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_mu(
@@ -1885,7 +1885,7 @@ void test_vloxseg5ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_mu(
@@ -1904,7 +1904,7 @@ void test_vloxseg5ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_mu(
@@ -1923,7 +1923,7 @@ void test_vloxseg5ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_mu(
@@ -1942,7 +1942,7 @@ void test_vloxseg5ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_mu(
@@ -1961,7 +1961,7 @@ void test_vloxseg5ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_mu(
@@ -1980,6 +1980,6 @@ void test_vloxseg5ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c
index 03a9513031b5..e5228472a2b0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vloxseg5ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vloxseg5ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_tu(
@@ -80,7 +80,7 @@ void test_vloxseg5ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_tu(
@@ -99,7 +99,7 @@ void test_vloxseg5ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_tu(
@@ -118,7 +118,7 @@ void test_vloxseg5ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_tu(
@@ -137,7 +137,7 @@ void test_vloxseg5ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_tu(
@@ -156,7 +156,7 @@ void test_vloxseg5ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_tu(
@@ -175,7 +175,7 @@ void test_vloxseg5ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_tu(
@@ -194,7 +194,7 @@ void test_vloxseg5ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_tu(
@@ -213,7 +213,7 @@ void test_vloxseg5ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_tu(
@@ -232,7 +232,7 @@ void test_vloxseg5ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_tu(
@@ -251,7 +251,7 @@ void test_vloxseg5ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_tu(
@@ -270,7 +270,7 @@ void test_vloxseg5ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vloxseg5ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_tu(
@@ -308,7 +308,7 @@ void test_vloxseg5ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_tu(
@@ -327,7 +327,7 @@ void test_vloxseg5ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_tu(
@@ -346,7 +346,7 @@ void test_vloxseg5ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_tu(
@@ -365,7 +365,7 @@ void test_vloxseg5ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_tu(
@@ -384,7 +384,7 @@ void test_vloxseg5ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_tu(
@@ -403,7 +403,7 @@ void test_vloxseg5ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_tu(
@@ -422,7 +422,7 @@ void test_vloxseg5ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_tu(
@@ -441,7 +441,7 @@ void test_vloxseg5ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_tu(
@@ -460,7 +460,7 @@ void test_vloxseg5ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_tu(
@@ -479,7 +479,7 @@ void test_vloxseg5ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_tu(
@@ -498,7 +498,7 @@ void test_vloxseg5ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_tum(
@@ -517,7 +517,7 @@ void test_vloxseg5ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_tum(
@@ -536,7 +536,7 @@ void test_vloxseg5ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_tum(
@@ -555,7 +555,7 @@ void test_vloxseg5ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_tum(
@@ -574,7 +574,7 @@ void test_vloxseg5ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_tum(
@@ -593,7 +593,7 @@ void test_vloxseg5ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_tum(
@@ -612,7 +612,7 @@ void test_vloxseg5ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_tum(
@@ -631,7 +631,7 @@ void test_vloxseg5ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_tum(
@@ -650,7 +650,7 @@ void test_vloxseg5ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_tum(
@@ -669,7 +669,7 @@ void test_vloxseg5ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_tum(
@@ -688,7 +688,7 @@ void test_vloxseg5ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_tum(
@@ -707,7 +707,7 @@ void test_vloxseg5ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_tum(
@@ -726,7 +726,7 @@ void test_vloxseg5ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_tum(
@@ -745,7 +745,7 @@ void test_vloxseg5ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_tum(
@@ -764,7 +764,7 @@ void test_vloxseg5ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_tum(
@@ -783,7 +783,7 @@ void test_vloxseg5ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_tum(
@@ -802,7 +802,7 @@ void test_vloxseg5ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_tum(
@@ -821,7 +821,7 @@ void test_vloxseg5ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_tum(
@@ -840,7 +840,7 @@ void test_vloxseg5ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_tum(
@@ -859,7 +859,7 @@ void test_vloxseg5ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_tum(
@@ -878,7 +878,7 @@ void test_vloxseg5ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_tum(
@@ -897,7 +897,7 @@ void test_vloxseg5ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_tum(
@@ -916,7 +916,7 @@ void test_vloxseg5ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_tum(
@@ -935,7 +935,7 @@ void test_vloxseg5ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_tum(
@@ -954,7 +954,7 @@ void test_vloxseg5ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_tum(
@@ -973,7 +973,7 @@ void test_vloxseg5ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_tum(
@@ -992,7 +992,7 @@ void test_vloxseg5ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_tumu(
@@ -1011,7 +1011,7 @@ void test_vloxseg5ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_tumu(
@@ -1030,7 +1030,7 @@ void test_vloxseg5ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_tumu(
@@ -1049,7 +1049,7 @@ void test_vloxseg5ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_tumu(
@@ -1068,7 +1068,7 @@ void test_vloxseg5ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_tumu(
@@ -1087,7 +1087,7 @@ void test_vloxseg5ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_tumu(
@@ -1106,7 +1106,7 @@ void test_vloxseg5ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_tumu(
@@ -1125,7 +1125,7 @@ void test_vloxseg5ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_tumu(
@@ -1144,7 +1144,7 @@ void test_vloxseg5ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_tumu(
@@ -1163,7 +1163,7 @@ void test_vloxseg5ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_tumu(
@@ -1182,7 +1182,7 @@ void test_vloxseg5ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_tumu(
@@ -1201,7 +1201,7 @@ void test_vloxseg5ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_tumu(
@@ -1220,7 +1220,7 @@ void test_vloxseg5ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vloxseg5ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_tumu(
@@ -1258,7 +1258,7 @@ void test_vloxseg5ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_tumu(
@@ -1277,7 +1277,7 @@ void test_vloxseg5ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vloxseg5ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_tumu(
@@ -1315,7 +1315,7 @@ void test_vloxseg5ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_tumu(
@@ -1334,7 +1334,7 @@ void test_vloxseg5ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_tumu(
@@ -1353,7 +1353,7 @@ void test_vloxseg5ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_tumu(
@@ -1372,7 +1372,7 @@ void test_vloxseg5ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_tumu(
@@ -1391,7 +1391,7 @@ void test_vloxseg5ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_tumu(
@@ -1410,7 +1410,7 @@ void test_vloxseg5ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg5ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_tumu(
@@ -1448,7 +1448,7 @@ void test_vloxseg5ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_tumu(
@@ -1467,7 +1467,7 @@ void test_vloxseg5ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_tumu(
@@ -1486,7 +1486,7 @@ void test_vloxseg5ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_mu(
@@ -1505,7 +1505,7 @@ void test_vloxseg5ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_mu(
@@ -1524,7 +1524,7 @@ void test_vloxseg5ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_mu(
@@ -1543,7 +1543,7 @@ void test_vloxseg5ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_mu(
@@ -1562,7 +1562,7 @@ void test_vloxseg5ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_mu(
@@ -1581,7 +1581,7 @@ void test_vloxseg5ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_mu(
@@ -1600,7 +1600,7 @@ void test_vloxseg5ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_mu(
@@ -1619,7 +1619,7 @@ void test_vloxseg5ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_mu(
@@ -1638,7 +1638,7 @@ void test_vloxseg5ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_mu(
@@ -1657,7 +1657,7 @@ void test_vloxseg5ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_mu(
@@ -1676,7 +1676,7 @@ void test_vloxseg5ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_mu(
@@ -1695,7 +1695,7 @@ void test_vloxseg5ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_mu(
@@ -1714,7 +1714,7 @@ void test_vloxseg5ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_mu(
@@ -1733,7 +1733,7 @@ void test_vloxseg5ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_mu(
@@ -1752,7 +1752,7 @@ void test_vloxseg5ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_mu(
@@ -1771,7 +1771,7 @@ void test_vloxseg5ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_mu(
@@ -1790,7 +1790,7 @@ void test_vloxseg5ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_mu(
@@ -1809,7 +1809,7 @@ void test_vloxseg5ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_mu(
@@ -1828,7 +1828,7 @@ void test_vloxseg5ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_mu(
@@ -1847,7 +1847,7 @@ void test_vloxseg5ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_mu(
@@ -1866,7 +1866,7 @@ void test_vloxseg5ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_mu(
@@ -1885,7 +1885,7 @@ void test_vloxseg5ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_mu(
@@ -1904,7 +1904,7 @@ void test_vloxseg5ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_mu(
@@ -1923,7 +1923,7 @@ void test_vloxseg5ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_mu(
@@ -1942,7 +1942,7 @@ void test_vloxseg5ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_mu(
@@ -1961,7 +1961,7 @@ void test_vloxseg5ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_mu(
@@ -1980,6 +1980,6 @@ void test_vloxseg5ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c
index f44f16068195..839953d8b61b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vloxseg5ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vloxseg5ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_tu(
@@ -80,7 +80,7 @@ void test_vloxseg5ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_tu(
@@ -99,7 +99,7 @@ void test_vloxseg5ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_tu(
@@ -118,7 +118,7 @@ void test_vloxseg5ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_tu(
@@ -137,7 +137,7 @@ void test_vloxseg5ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_tu(
@@ -156,7 +156,7 @@ void test_vloxseg5ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_tu(
@@ -175,7 +175,7 @@ void test_vloxseg5ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_tu(
@@ -194,7 +194,7 @@ void test_vloxseg5ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_tu(
@@ -213,7 +213,7 @@ void test_vloxseg5ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_tu(
@@ -232,7 +232,7 @@ void test_vloxseg5ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_tu(
@@ -251,7 +251,7 @@ void test_vloxseg5ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_tu(
@@ -270,7 +270,7 @@ void test_vloxseg5ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vloxseg5ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_tu(
@@ -308,7 +308,7 @@ void test_vloxseg5ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_tu(
@@ -327,7 +327,7 @@ void test_vloxseg5ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_tu(
@@ -346,7 +346,7 @@ void test_vloxseg5ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_tu(
@@ -365,7 +365,7 @@ void test_vloxseg5ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_tu(
@@ -384,7 +384,7 @@ void test_vloxseg5ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_tu(
@@ -403,7 +403,7 @@ void test_vloxseg5ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_tu(
@@ -422,7 +422,7 @@ void test_vloxseg5ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_tu(
@@ -441,7 +441,7 @@ void test_vloxseg5ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_tu(
@@ -460,7 +460,7 @@ void test_vloxseg5ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_tu(
@@ -479,7 +479,7 @@ void test_vloxseg5ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_tu(
@@ -498,7 +498,7 @@ void test_vloxseg5ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_tum(
@@ -517,7 +517,7 @@ void test_vloxseg5ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_tum(
@@ -536,7 +536,7 @@ void test_vloxseg5ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_tum(
@@ -555,7 +555,7 @@ void test_vloxseg5ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_tum(
@@ -574,7 +574,7 @@ void test_vloxseg5ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_tum(
@@ -593,7 +593,7 @@ void test_vloxseg5ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_tum(
@@ -612,7 +612,7 @@ void test_vloxseg5ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_tum(
@@ -631,7 +631,7 @@ void test_vloxseg5ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_tum(
@@ -650,7 +650,7 @@ void test_vloxseg5ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_tum(
@@ -669,7 +669,7 @@ void test_vloxseg5ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_tum(
@@ -688,7 +688,7 @@ void test_vloxseg5ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_tum(
@@ -707,7 +707,7 @@ void test_vloxseg5ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_tum(
@@ -726,7 +726,7 @@ void test_vloxseg5ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_tum(
@@ -745,7 +745,7 @@ void test_vloxseg5ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_tum(
@@ -764,7 +764,7 @@ void test_vloxseg5ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_tum(
@@ -783,7 +783,7 @@ void test_vloxseg5ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_tum(
@@ -802,7 +802,7 @@ void test_vloxseg5ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_tum(
@@ -821,7 +821,7 @@ void test_vloxseg5ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_tum(
@@ -840,7 +840,7 @@ void test_vloxseg5ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_tum(
@@ -859,7 +859,7 @@ void test_vloxseg5ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_tum(
@@ -878,7 +878,7 @@ void test_vloxseg5ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_tum(
@@ -897,7 +897,7 @@ void test_vloxseg5ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_tum(
@@ -916,7 +916,7 @@ void test_vloxseg5ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_tum(
@@ -935,7 +935,7 @@ void test_vloxseg5ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_tum(
@@ -954,7 +954,7 @@ void test_vloxseg5ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_tum(
@@ -973,7 +973,7 @@ void test_vloxseg5ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_tum(
@@ -992,7 +992,7 @@ void test_vloxseg5ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_tumu(
@@ -1011,7 +1011,7 @@ void test_vloxseg5ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_tumu(
@@ -1030,7 +1030,7 @@ void test_vloxseg5ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_tumu(
@@ -1049,7 +1049,7 @@ void test_vloxseg5ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_tumu(
@@ -1068,7 +1068,7 @@ void test_vloxseg5ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_tumu(
@@ -1087,7 +1087,7 @@ void test_vloxseg5ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_tumu(
@@ -1106,7 +1106,7 @@ void test_vloxseg5ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_tumu(
@@ -1125,7 +1125,7 @@ void test_vloxseg5ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_tumu(
@@ -1144,7 +1144,7 @@ void test_vloxseg5ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_tumu(
@@ -1163,7 +1163,7 @@ void test_vloxseg5ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_tumu(
@@ -1182,7 +1182,7 @@ void test_vloxseg5ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_tumu(
@@ -1201,7 +1201,7 @@ void test_vloxseg5ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_tumu(
@@ -1220,7 +1220,7 @@ void test_vloxseg5ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vloxseg5ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_tumu(
@@ -1258,7 +1258,7 @@ void test_vloxseg5ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_tumu(
@@ -1277,7 +1277,7 @@ void test_vloxseg5ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vloxseg5ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_tumu(
@@ -1315,7 +1315,7 @@ void test_vloxseg5ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_tumu(
@@ -1334,7 +1334,7 @@ void test_vloxseg5ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_tumu(
@@ -1353,7 +1353,7 @@ void test_vloxseg5ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_tumu(
@@ -1372,7 +1372,7 @@ void test_vloxseg5ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_tumu(
@@ -1391,7 +1391,7 @@ void test_vloxseg5ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_tumu(
@@ -1410,7 +1410,7 @@ void test_vloxseg5ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg5ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_tumu(
@@ -1448,7 +1448,7 @@ void test_vloxseg5ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_tumu(
@@ -1467,7 +1467,7 @@ void test_vloxseg5ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_tumu(
@@ -1486,7 +1486,7 @@ void test_vloxseg5ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_mu(
@@ -1505,7 +1505,7 @@ void test_vloxseg5ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_mu(
@@ -1524,7 +1524,7 @@ void test_vloxseg5ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_mu(
@@ -1543,7 +1543,7 @@ void test_vloxseg5ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_mu(
@@ -1562,7 +1562,7 @@ void test_vloxseg5ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_mu(
@@ -1581,7 +1581,7 @@ void test_vloxseg5ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_mu(
@@ -1600,7 +1600,7 @@ void test_vloxseg5ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_mu(
@@ -1619,7 +1619,7 @@ void test_vloxseg5ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_mu(
@@ -1638,7 +1638,7 @@ void test_vloxseg5ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_mu(
@@ -1657,7 +1657,7 @@ void test_vloxseg5ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_mu(
@@ -1676,7 +1676,7 @@ void test_vloxseg5ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_mu(
@@ -1695,7 +1695,7 @@ void test_vloxseg5ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_mu(
@@ -1714,7 +1714,7 @@ void test_vloxseg5ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_mu(
@@ -1733,7 +1733,7 @@ void test_vloxseg5ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_mu(
@@ -1752,7 +1752,7 @@ void test_vloxseg5ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_mu(
@@ -1771,7 +1771,7 @@ void test_vloxseg5ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_mu(
@@ -1790,7 +1790,7 @@ void test_vloxseg5ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_mu(
@@ -1809,7 +1809,7 @@ void test_vloxseg5ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_mu(
@@ -1828,7 +1828,7 @@ void test_vloxseg5ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_mu(
@@ -1847,7 +1847,7 @@ void test_vloxseg5ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_mu(
@@ -1866,7 +1866,7 @@ void test_vloxseg5ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_mu(
@@ -1885,7 +1885,7 @@ void test_vloxseg5ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_mu(
@@ -1904,7 +1904,7 @@ void test_vloxseg5ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_mu(
@@ -1923,7 +1923,7 @@ void test_vloxseg5ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_mu(
@@ -1942,7 +1942,7 @@ void test_vloxseg5ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_mu(
@@ -1961,7 +1961,7 @@ void test_vloxseg5ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_mu(
@@ -1980,6 +1980,6 @@ void test_vloxseg5ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c
index f68d1fbfc439..632dfec579f5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vloxseg5ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vloxseg5ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_tu(
@@ -80,7 +80,7 @@ void test_vloxseg5ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_tu(
@@ -99,7 +99,7 @@ void test_vloxseg5ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_tu(
@@ -118,7 +118,7 @@ void test_vloxseg5ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_tu(
@@ -137,7 +137,7 @@ void test_vloxseg5ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_tu(
@@ -156,7 +156,7 @@ void test_vloxseg5ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_tu(
@@ -175,7 +175,7 @@ void test_vloxseg5ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_tu(
@@ -194,7 +194,7 @@ void test_vloxseg5ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_tu(
@@ -213,7 +213,7 @@ void test_vloxseg5ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_tu(
@@ -232,7 +232,7 @@ void test_vloxseg5ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_tu(
@@ -251,7 +251,7 @@ void test_vloxseg5ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_tu(
@@ -270,7 +270,7 @@ void test_vloxseg5ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vloxseg5ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_tu(
@@ -308,7 +308,7 @@ void test_vloxseg5ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_tu(
@@ -327,7 +327,7 @@ void test_vloxseg5ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_tu(
@@ -346,7 +346,7 @@ void test_vloxseg5ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_tu(
@@ -365,7 +365,7 @@ void test_vloxseg5ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_tu(
@@ -384,7 +384,7 @@ void test_vloxseg5ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_tu(
@@ -403,7 +403,7 @@ void test_vloxseg5ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_tu(
@@ -422,7 +422,7 @@ void test_vloxseg5ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_tu(
@@ -441,7 +441,7 @@ void test_vloxseg5ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_tu(
@@ -460,7 +460,7 @@ void test_vloxseg5ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_tu(
@@ -479,7 +479,7 @@ void test_vloxseg5ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_tu(
@@ -498,7 +498,7 @@ void test_vloxseg5ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_tum(
@@ -517,7 +517,7 @@ void test_vloxseg5ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_tum(
@@ -536,7 +536,7 @@ void test_vloxseg5ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_tum(
@@ -555,7 +555,7 @@ void test_vloxseg5ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_tum(
@@ -574,7 +574,7 @@ void test_vloxseg5ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_tum(
@@ -593,7 +593,7 @@ void test_vloxseg5ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_tum(
@@ -612,7 +612,7 @@ void test_vloxseg5ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_tum(
@@ -631,7 +631,7 @@ void test_vloxseg5ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_tum(
@@ -650,7 +650,7 @@ void test_vloxseg5ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_tum(
@@ -669,7 +669,7 @@ void test_vloxseg5ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_tum(
@@ -688,7 +688,7 @@ void test_vloxseg5ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_tum(
@@ -707,7 +707,7 @@ void test_vloxseg5ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_tum(
@@ -726,7 +726,7 @@ void test_vloxseg5ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_tum(
@@ -745,7 +745,7 @@ void test_vloxseg5ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_tum(
@@ -764,7 +764,7 @@ void test_vloxseg5ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_tum(
@@ -783,7 +783,7 @@ void test_vloxseg5ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_tum(
@@ -802,7 +802,7 @@ void test_vloxseg5ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_tum(
@@ -821,7 +821,7 @@ void test_vloxseg5ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_tum(
@@ -840,7 +840,7 @@ void test_vloxseg5ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_tum(
@@ -859,7 +859,7 @@ void test_vloxseg5ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_tum(
@@ -878,7 +878,7 @@ void test_vloxseg5ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_tum(
@@ -897,7 +897,7 @@ void test_vloxseg5ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_tum(
@@ -916,7 +916,7 @@ void test_vloxseg5ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_tum(
@@ -935,7 +935,7 @@ void test_vloxseg5ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_tum(
@@ -954,7 +954,7 @@ void test_vloxseg5ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_tum(
@@ -973,7 +973,7 @@ void test_vloxseg5ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_tum(
@@ -992,7 +992,7 @@ void test_vloxseg5ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_tumu(
@@ -1011,7 +1011,7 @@ void test_vloxseg5ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_tumu(
@@ -1030,7 +1030,7 @@ void test_vloxseg5ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_tumu(
@@ -1049,7 +1049,7 @@ void test_vloxseg5ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_tumu(
@@ -1068,7 +1068,7 @@ void test_vloxseg5ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_tumu(
@@ -1087,7 +1087,7 @@ void test_vloxseg5ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_tumu(
@@ -1106,7 +1106,7 @@ void test_vloxseg5ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_tumu(
@@ -1125,7 +1125,7 @@ void test_vloxseg5ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_tumu(
@@ -1144,7 +1144,7 @@ void test_vloxseg5ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_tumu(
@@ -1163,7 +1163,7 @@ void test_vloxseg5ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_tumu(
@@ -1182,7 +1182,7 @@ void test_vloxseg5ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_tumu(
@@ -1201,7 +1201,7 @@ void test_vloxseg5ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_tumu(
@@ -1220,7 +1220,7 @@ void test_vloxseg5ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vloxseg5ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_tumu(
@@ -1258,7 +1258,7 @@ void test_vloxseg5ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_tumu(
@@ -1277,7 +1277,7 @@ void test_vloxseg5ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vloxseg5ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_tumu(
@@ -1315,7 +1315,7 @@ void test_vloxseg5ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_tumu(
@@ -1334,7 +1334,7 @@ void test_vloxseg5ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_tumu(
@@ -1353,7 +1353,7 @@ void test_vloxseg5ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_tumu(
@@ -1372,7 +1372,7 @@ void test_vloxseg5ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_tumu(
@@ -1391,7 +1391,7 @@ void test_vloxseg5ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_tumu(
@@ -1410,7 +1410,7 @@ void test_vloxseg5ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg5ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_tumu(
@@ -1448,7 +1448,7 @@ void test_vloxseg5ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_tumu(
@@ -1467,7 +1467,7 @@ void test_vloxseg5ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_tumu(
@@ -1486,7 +1486,7 @@ void test_vloxseg5ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_mu(
@@ -1505,7 +1505,7 @@ void test_vloxseg5ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_mu(
@@ -1524,7 +1524,7 @@ void test_vloxseg5ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_mu(
@@ -1543,7 +1543,7 @@ void test_vloxseg5ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_mu(
@@ -1562,7 +1562,7 @@ void test_vloxseg5ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_mu(
@@ -1581,7 +1581,7 @@ void test_vloxseg5ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_mu(
@@ -1600,7 +1600,7 @@ void test_vloxseg5ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_mu(
@@ -1619,7 +1619,7 @@ void test_vloxseg5ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_mu(
@@ -1638,7 +1638,7 @@ void test_vloxseg5ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_mu(
@@ -1657,7 +1657,7 @@ void test_vloxseg5ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_mu(
@@ -1676,7 +1676,7 @@ void test_vloxseg5ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_mu(
@@ -1695,7 +1695,7 @@ void test_vloxseg5ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_mu(
@@ -1714,7 +1714,7 @@ void test_vloxseg5ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_mu(
@@ -1733,7 +1733,7 @@ void test_vloxseg5ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_mu(
@@ -1752,7 +1752,7 @@ void test_vloxseg5ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_mu(
@@ -1771,7 +1771,7 @@ void test_vloxseg5ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_mu(
@@ -1790,7 +1790,7 @@ void test_vloxseg5ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_mu(
@@ -1809,7 +1809,7 @@ void test_vloxseg5ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_mu(
@@ -1828,7 +1828,7 @@ void test_vloxseg5ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_mu(
@@ -1847,7 +1847,7 @@ void test_vloxseg5ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_mu(
@@ -1866,7 +1866,7 @@ void test_vloxseg5ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_mu(
@@ -1885,7 +1885,7 @@ void test_vloxseg5ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_mu(
@@ -1904,7 +1904,7 @@ void test_vloxseg5ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_mu(
@@ -1923,7 +1923,7 @@ void test_vloxseg5ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_mu(
@@ -1942,7 +1942,7 @@ void test_vloxseg5ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_mu(
@@ -1961,7 +1961,7 @@ void test_vloxseg5ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_mu(
@@ -1980,6 +1980,6 @@ void test_vloxseg5ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg5ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vloxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c
index a335c90be255..169d38fee157 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vloxseg6ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vloxseg6ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_tu(
@@ -88,7 +88,7 @@ void test_vloxseg6ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_tu(
@@ -109,7 +109,7 @@ void test_vloxseg6ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_tu(
@@ -130,7 +130,7 @@ void test_vloxseg6ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_tu(
@@ -151,7 +151,7 @@ void test_vloxseg6ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_tu(
@@ -172,7 +172,7 @@ void test_vloxseg6ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_tu(
@@ -193,7 +193,7 @@ void test_vloxseg6ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_tu(
@@ -214,7 +214,7 @@ void test_vloxseg6ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_tu(
@@ -235,7 +235,7 @@ void test_vloxseg6ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_tu(
@@ -256,7 +256,7 @@ void test_vloxseg6ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vloxseg6ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_tu(
@@ -298,7 +298,7 @@ void test_vloxseg6ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_tu(
@@ -319,7 +319,7 @@ void test_vloxseg6ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_tu(
@@ -340,7 +340,7 @@ void test_vloxseg6ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_tu(
@@ -361,7 +361,7 @@ void test_vloxseg6ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_tu(
@@ -382,7 +382,7 @@ void test_vloxseg6ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_tu(
@@ -403,7 +403,7 @@ void test_vloxseg6ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_tu(
@@ -424,7 +424,7 @@ void test_vloxseg6ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_tu(
@@ -445,7 +445,7 @@ void test_vloxseg6ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_tu(
@@ -466,7 +466,7 @@ void test_vloxseg6ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_tu(
@@ -487,7 +487,7 @@ void test_vloxseg6ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_tu(
@@ -508,7 +508,7 @@ void test_vloxseg6ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_tu(
@@ -529,7 +529,7 @@ void test_vloxseg6ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_tu(
@@ -550,7 +550,7 @@ void test_vloxseg6ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_tum(
@@ -571,7 +571,7 @@ void test_vloxseg6ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_tum(
@@ -592,7 +592,7 @@ void test_vloxseg6ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_tum(
@@ -613,7 +613,7 @@ void test_vloxseg6ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vloxseg6ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_tum(
@@ -655,7 +655,7 @@ void test_vloxseg6ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_tum(
@@ -676,7 +676,7 @@ void test_vloxseg6ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_tum(
@@ -697,7 +697,7 @@ void test_vloxseg6ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_tum(
@@ -718,7 +718,7 @@ void test_vloxseg6ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vloxseg6ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_tum(
@@ -760,7 +760,7 @@ void test_vloxseg6ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_tum(
@@ -781,7 +781,7 @@ void test_vloxseg6ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_tum(
@@ -802,7 +802,7 @@ void test_vloxseg6ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_tum(
@@ -823,7 +823,7 @@ void test_vloxseg6ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vloxseg6ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_tum(
@@ -865,7 +865,7 @@ void test_vloxseg6ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_tum(
@@ -886,7 +886,7 @@ void test_vloxseg6ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_tum(
@@ -907,7 +907,7 @@ void test_vloxseg6ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_tum(
@@ -928,7 +928,7 @@ void test_vloxseg6ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vloxseg6ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_tum(
@@ -970,7 +970,7 @@ void test_vloxseg6ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_tum(
@@ -991,7 +991,7 @@ void test_vloxseg6ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_tum(
@@ -1012,7 +1012,7 @@ void test_vloxseg6ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_tum(
@@ -1033,7 +1033,7 @@ void test_vloxseg6ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg6ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_tum(
@@ -1075,7 +1075,7 @@ void test_vloxseg6ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_tum(
@@ -1096,7 +1096,7 @@ void test_vloxseg6ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_tumu(
@@ -1117,7 +1117,7 @@ void test_vloxseg6ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_tumu(
@@ -1138,7 +1138,7 @@ void test_vloxseg6ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vloxseg6ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_tumu(
@@ -1180,7 +1180,7 @@ void test_vloxseg6ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_tumu(
@@ -1201,7 +1201,7 @@ void test_vloxseg6ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_tumu(
@@ -1222,7 +1222,7 @@ void test_vloxseg6ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_tumu(
@@ -1243,7 +1243,7 @@ void test_vloxseg6ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vloxseg6ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_tumu(
@@ -1285,7 +1285,7 @@ void test_vloxseg6ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_tumu(
@@ -1306,7 +1306,7 @@ void test_vloxseg6ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_tumu(
@@ -1327,7 +1327,7 @@ void test_vloxseg6ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_tumu(
@@ -1348,7 +1348,7 @@ void test_vloxseg6ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg6ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_tumu(
@@ -1390,7 +1390,7 @@ void test_vloxseg6ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_tumu(
@@ -1411,7 +1411,7 @@ void test_vloxseg6ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_tumu(
@@ -1432,7 +1432,7 @@ void test_vloxseg6ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_tumu(
@@ -1453,7 +1453,7 @@ void test_vloxseg6ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_tumu(
@@ -1474,7 +1474,7 @@ void test_vloxseg6ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_tumu(
@@ -1495,7 +1495,7 @@ void test_vloxseg6ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_tumu(
@@ -1516,7 +1516,7 @@ void test_vloxseg6ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_tumu(
@@ -1537,7 +1537,7 @@ void test_vloxseg6ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_tumu(
@@ -1558,7 +1558,7 @@ void test_vloxseg6ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg6ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_tumu(
@@ -1600,7 +1600,7 @@ void test_vloxseg6ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_tumu(
@@ -1621,7 +1621,7 @@ void test_vloxseg6ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_tumu(
@@ -1642,7 +1642,7 @@ void test_vloxseg6ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_mu(
@@ -1663,7 +1663,7 @@ void test_vloxseg6ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_mu(
@@ -1684,7 +1684,7 @@ void test_vloxseg6ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_mu(
@@ -1705,7 +1705,7 @@ void test_vloxseg6ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_mu(
@@ -1726,7 +1726,7 @@ void test_vloxseg6ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_mu(
@@ -1747,7 +1747,7 @@ void test_vloxseg6ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_mu(
@@ -1768,7 +1768,7 @@ void test_vloxseg6ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_mu(
@@ -1789,7 +1789,7 @@ void test_vloxseg6ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_mu(
@@ -1810,7 +1810,7 @@ void test_vloxseg6ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_mu(
@@ -1831,7 +1831,7 @@ void test_vloxseg6ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_mu(
@@ -1852,7 +1852,7 @@ void test_vloxseg6ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_mu(
@@ -1873,7 +1873,7 @@ void test_vloxseg6ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_mu(
@@ -1894,7 +1894,7 @@ void test_vloxseg6ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vloxseg6ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_mu(
@@ -1936,7 +1936,7 @@ void test_vloxseg6ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_mu(
@@ -1957,7 +1957,7 @@ void test_vloxseg6ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_mu(
@@ -1978,7 +1978,7 @@ void test_vloxseg6ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_mu(
@@ -1999,7 +1999,7 @@ void test_vloxseg6ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_mu(
@@ -2020,7 +2020,7 @@ void test_vloxseg6ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_mu(
@@ -2041,7 +2041,7 @@ void test_vloxseg6ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_mu(
@@ -2062,7 +2062,7 @@ void test_vloxseg6ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_mu(
@@ -2083,7 +2083,7 @@ void test_vloxseg6ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg6ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_mu(
@@ -2125,7 +2125,7 @@ void test_vloxseg6ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_mu(
@@ -2146,7 +2146,7 @@ void test_vloxseg6ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_mu(
@@ -2167,7 +2167,7 @@ void test_vloxseg6ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_mu(
@@ -2188,6 +2188,6 @@ void test_vloxseg6ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c
index bfbbcdd1721a..24e54c576391 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vloxseg6ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vloxseg6ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_tu(
@@ -88,7 +88,7 @@ void test_vloxseg6ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_tu(
@@ -109,7 +109,7 @@ void test_vloxseg6ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_tu(
@@ -130,7 +130,7 @@ void test_vloxseg6ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_tu(
@@ -151,7 +151,7 @@ void test_vloxseg6ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_tu(
@@ -172,7 +172,7 @@ void test_vloxseg6ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_tu(
@@ -193,7 +193,7 @@ void test_vloxseg6ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_tu(
@@ -214,7 +214,7 @@ void test_vloxseg6ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_tu(
@@ -235,7 +235,7 @@ void test_vloxseg6ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_tu(
@@ -256,7 +256,7 @@ void test_vloxseg6ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vloxseg6ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_tu(
@@ -298,7 +298,7 @@ void test_vloxseg6ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_tu(
@@ -319,7 +319,7 @@ void test_vloxseg6ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_tu(
@@ -340,7 +340,7 @@ void test_vloxseg6ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_tu(
@@ -361,7 +361,7 @@ void test_vloxseg6ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_tu(
@@ -382,7 +382,7 @@ void test_vloxseg6ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_tu(
@@ -403,7 +403,7 @@ void test_vloxseg6ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_tu(
@@ -424,7 +424,7 @@ void test_vloxseg6ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_tu(
@@ -445,7 +445,7 @@ void test_vloxseg6ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_tu(
@@ -466,7 +466,7 @@ void test_vloxseg6ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_tu(
@@ -487,7 +487,7 @@ void test_vloxseg6ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_tu(
@@ -508,7 +508,7 @@ void test_vloxseg6ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_tu(
@@ -529,7 +529,7 @@ void test_vloxseg6ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_tu(
@@ -550,7 +550,7 @@ void test_vloxseg6ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_tum(
@@ -571,7 +571,7 @@ void test_vloxseg6ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_tum(
@@ -592,7 +592,7 @@ void test_vloxseg6ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_tum(
@@ -613,7 +613,7 @@ void test_vloxseg6ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vloxseg6ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_tum(
@@ -655,7 +655,7 @@ void test_vloxseg6ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_tum(
@@ -676,7 +676,7 @@ void test_vloxseg6ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_tum(
@@ -697,7 +697,7 @@ void test_vloxseg6ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_tum(
@@ -718,7 +718,7 @@ void test_vloxseg6ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vloxseg6ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_tum(
@@ -760,7 +760,7 @@ void test_vloxseg6ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_tum(
@@ -781,7 +781,7 @@ void test_vloxseg6ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_tum(
@@ -802,7 +802,7 @@ void test_vloxseg6ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_tum(
@@ -823,7 +823,7 @@ void test_vloxseg6ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vloxseg6ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_tum(
@@ -865,7 +865,7 @@ void test_vloxseg6ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_tum(
@@ -886,7 +886,7 @@ void test_vloxseg6ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_tum(
@@ -907,7 +907,7 @@ void test_vloxseg6ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_tum(
@@ -928,7 +928,7 @@ void test_vloxseg6ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vloxseg6ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_tum(
@@ -970,7 +970,7 @@ void test_vloxseg6ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_tum(
@@ -991,7 +991,7 @@ void test_vloxseg6ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_tum(
@@ -1012,7 +1012,7 @@ void test_vloxseg6ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_tum(
@@ -1033,7 +1033,7 @@ void test_vloxseg6ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg6ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_tum(
@@ -1075,7 +1075,7 @@ void test_vloxseg6ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_tum(
@@ -1096,7 +1096,7 @@ void test_vloxseg6ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_tumu(
@@ -1117,7 +1117,7 @@ void test_vloxseg6ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_tumu(
@@ -1138,7 +1138,7 @@ void test_vloxseg6ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vloxseg6ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_tumu(
@@ -1180,7 +1180,7 @@ void test_vloxseg6ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_tumu(
@@ -1201,7 +1201,7 @@ void test_vloxseg6ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_tumu(
@@ -1222,7 +1222,7 @@ void test_vloxseg6ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_tumu(
@@ -1243,7 +1243,7 @@ void test_vloxseg6ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vloxseg6ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_tumu(
@@ -1285,7 +1285,7 @@ void test_vloxseg6ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_tumu(
@@ -1306,7 +1306,7 @@ void test_vloxseg6ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_tumu(
@@ -1327,7 +1327,7 @@ void test_vloxseg6ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_tumu(
@@ -1348,7 +1348,7 @@ void test_vloxseg6ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg6ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_tumu(
@@ -1390,7 +1390,7 @@ void test_vloxseg6ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_tumu(
@@ -1411,7 +1411,7 @@ void test_vloxseg6ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_tumu(
@@ -1432,7 +1432,7 @@ void test_vloxseg6ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_tumu(
@@ -1453,7 +1453,7 @@ void test_vloxseg6ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_tumu(
@@ -1474,7 +1474,7 @@ void test_vloxseg6ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_tumu(
@@ -1495,7 +1495,7 @@ void test_vloxseg6ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_tumu(
@@ -1516,7 +1516,7 @@ void test_vloxseg6ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_tumu(
@@ -1537,7 +1537,7 @@ void test_vloxseg6ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_tumu(
@@ -1558,7 +1558,7 @@ void test_vloxseg6ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg6ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_tumu(
@@ -1600,7 +1600,7 @@ void test_vloxseg6ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_tumu(
@@ -1621,7 +1621,7 @@ void test_vloxseg6ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_tumu(
@@ -1642,7 +1642,7 @@ void test_vloxseg6ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_mu(
@@ -1663,7 +1663,7 @@ void test_vloxseg6ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_mu(
@@ -1684,7 +1684,7 @@ void test_vloxseg6ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_mu(
@@ -1705,7 +1705,7 @@ void test_vloxseg6ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_mu(
@@ -1726,7 +1726,7 @@ void test_vloxseg6ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_mu(
@@ -1747,7 +1747,7 @@ void test_vloxseg6ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_mu(
@@ -1768,7 +1768,7 @@ void test_vloxseg6ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_mu(
@@ -1789,7 +1789,7 @@ void test_vloxseg6ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_mu(
@@ -1810,7 +1810,7 @@ void test_vloxseg6ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_mu(
@@ -1831,7 +1831,7 @@ void test_vloxseg6ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_mu(
@@ -1852,7 +1852,7 @@ void test_vloxseg6ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_mu(
@@ -1873,7 +1873,7 @@ void test_vloxseg6ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_mu(
@@ -1894,7 +1894,7 @@ void test_vloxseg6ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vloxseg6ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_mu(
@@ -1936,7 +1936,7 @@ void test_vloxseg6ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_mu(
@@ -1957,7 +1957,7 @@ void test_vloxseg6ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_mu(
@@ -1978,7 +1978,7 @@ void test_vloxseg6ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_mu(
@@ -1999,7 +1999,7 @@ void test_vloxseg6ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_mu(
@@ -2020,7 +2020,7 @@ void test_vloxseg6ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_mu(
@@ -2041,7 +2041,7 @@ void test_vloxseg6ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_mu(
@@ -2062,7 +2062,7 @@ void test_vloxseg6ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_mu(
@@ -2083,7 +2083,7 @@ void test_vloxseg6ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg6ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_mu(
@@ -2125,7 +2125,7 @@ void test_vloxseg6ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_mu(
@@ -2146,7 +2146,7 @@ void test_vloxseg6ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_mu(
@@ -2167,7 +2167,7 @@ void test_vloxseg6ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_mu(
@@ -2188,6 +2188,6 @@ void test_vloxseg6ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c
index f218f304b049..7eb706378c96 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vloxseg6ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vloxseg6ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_tu(
@@ -88,7 +88,7 @@ void test_vloxseg6ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_tu(
@@ -109,7 +109,7 @@ void test_vloxseg6ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_tu(
@@ -130,7 +130,7 @@ void test_vloxseg6ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_tu(
@@ -151,7 +151,7 @@ void test_vloxseg6ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_tu(
@@ -172,7 +172,7 @@ void test_vloxseg6ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_tu(
@@ -193,7 +193,7 @@ void test_vloxseg6ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_tu(
@@ -214,7 +214,7 @@ void test_vloxseg6ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_tu(
@@ -235,7 +235,7 @@ void test_vloxseg6ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_tu(
@@ -256,7 +256,7 @@ void test_vloxseg6ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vloxseg6ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_tu(
@@ -298,7 +298,7 @@ void test_vloxseg6ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_tu(
@@ -319,7 +319,7 @@ void test_vloxseg6ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_tu(
@@ -340,7 +340,7 @@ void test_vloxseg6ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_tu(
@@ -361,7 +361,7 @@ void test_vloxseg6ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_tu(
@@ -382,7 +382,7 @@ void test_vloxseg6ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_tu(
@@ -403,7 +403,7 @@ void test_vloxseg6ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_tu(
@@ -424,7 +424,7 @@ void test_vloxseg6ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_tu(
@@ -445,7 +445,7 @@ void test_vloxseg6ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_tu(
@@ -466,7 +466,7 @@ void test_vloxseg6ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_tu(
@@ -487,7 +487,7 @@ void test_vloxseg6ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_tu(
@@ -508,7 +508,7 @@ void test_vloxseg6ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_tu(
@@ -529,7 +529,7 @@ void test_vloxseg6ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_tu(
@@ -550,7 +550,7 @@ void test_vloxseg6ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_tum(
@@ -571,7 +571,7 @@ void test_vloxseg6ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_tum(
@@ -592,7 +592,7 @@ void test_vloxseg6ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_tum(
@@ -613,7 +613,7 @@ void test_vloxseg6ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vloxseg6ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_tum(
@@ -655,7 +655,7 @@ void test_vloxseg6ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_tum(
@@ -676,7 +676,7 @@ void test_vloxseg6ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_tum(
@@ -697,7 +697,7 @@ void test_vloxseg6ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_tum(
@@ -718,7 +718,7 @@ void test_vloxseg6ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vloxseg6ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_tum(
@@ -760,7 +760,7 @@ void test_vloxseg6ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_tum(
@@ -781,7 +781,7 @@ void test_vloxseg6ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_tum(
@@ -802,7 +802,7 @@ void test_vloxseg6ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_tum(
@@ -823,7 +823,7 @@ void test_vloxseg6ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vloxseg6ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_tum(
@@ -865,7 +865,7 @@ void test_vloxseg6ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_tum(
@@ -886,7 +886,7 @@ void test_vloxseg6ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_tum(
@@ -907,7 +907,7 @@ void test_vloxseg6ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_tum(
@@ -928,7 +928,7 @@ void test_vloxseg6ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vloxseg6ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_tum(
@@ -970,7 +970,7 @@ void test_vloxseg6ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_tum(
@@ -991,7 +991,7 @@ void test_vloxseg6ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_tum(
@@ -1012,7 +1012,7 @@ void test_vloxseg6ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_tum(
@@ -1033,7 +1033,7 @@ void test_vloxseg6ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg6ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_tum(
@@ -1075,7 +1075,7 @@ void test_vloxseg6ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_tum(
@@ -1096,7 +1096,7 @@ void test_vloxseg6ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_tumu(
@@ -1117,7 +1117,7 @@ void test_vloxseg6ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_tumu(
@@ -1138,7 +1138,7 @@ void test_vloxseg6ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vloxseg6ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_tumu(
@@ -1180,7 +1180,7 @@ void test_vloxseg6ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_tumu(
@@ -1201,7 +1201,7 @@ void test_vloxseg6ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_tumu(
@@ -1222,7 +1222,7 @@ void test_vloxseg6ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_tumu(
@@ -1243,7 +1243,7 @@ void test_vloxseg6ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vloxseg6ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_tumu(
@@ -1285,7 +1285,7 @@ void test_vloxseg6ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_tumu(
@@ -1306,7 +1306,7 @@ void test_vloxseg6ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_tumu(
@@ -1327,7 +1327,7 @@ void test_vloxseg6ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_tumu(
@@ -1348,7 +1348,7 @@ void test_vloxseg6ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg6ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_tumu(
@@ -1390,7 +1390,7 @@ void test_vloxseg6ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_tumu(
@@ -1411,7 +1411,7 @@ void test_vloxseg6ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_tumu(
@@ -1432,7 +1432,7 @@ void test_vloxseg6ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_tumu(
@@ -1453,7 +1453,7 @@ void test_vloxseg6ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_tumu(
@@ -1474,7 +1474,7 @@ void test_vloxseg6ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_tumu(
@@ -1495,7 +1495,7 @@ void test_vloxseg6ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_tumu(
@@ -1516,7 +1516,7 @@ void test_vloxseg6ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_tumu(
@@ -1537,7 +1537,7 @@ void test_vloxseg6ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_tumu(
@@ -1558,7 +1558,7 @@ void test_vloxseg6ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg6ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_tumu(
@@ -1600,7 +1600,7 @@ void test_vloxseg6ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_tumu(
@@ -1621,7 +1621,7 @@ void test_vloxseg6ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_tumu(
@@ -1642,7 +1642,7 @@ void test_vloxseg6ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_mu(
@@ -1663,7 +1663,7 @@ void test_vloxseg6ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_mu(
@@ -1684,7 +1684,7 @@ void test_vloxseg6ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_mu(
@@ -1705,7 +1705,7 @@ void test_vloxseg6ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_mu(
@@ -1726,7 +1726,7 @@ void test_vloxseg6ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_mu(
@@ -1747,7 +1747,7 @@ void test_vloxseg6ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_mu(
@@ -1768,7 +1768,7 @@ void test_vloxseg6ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_mu(
@@ -1789,7 +1789,7 @@ void test_vloxseg6ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_mu(
@@ -1810,7 +1810,7 @@ void test_vloxseg6ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_mu(
@@ -1831,7 +1831,7 @@ void test_vloxseg6ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_mu(
@@ -1852,7 +1852,7 @@ void test_vloxseg6ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_mu(
@@ -1873,7 +1873,7 @@ void test_vloxseg6ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_mu(
@@ -1894,7 +1894,7 @@ void test_vloxseg6ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vloxseg6ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_mu(
@@ -1936,7 +1936,7 @@ void test_vloxseg6ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_mu(
@@ -1957,7 +1957,7 @@ void test_vloxseg6ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_mu(
@@ -1978,7 +1978,7 @@ void test_vloxseg6ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_mu(
@@ -1999,7 +1999,7 @@ void test_vloxseg6ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_mu(
@@ -2020,7 +2020,7 @@ void test_vloxseg6ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_mu(
@@ -2041,7 +2041,7 @@ void test_vloxseg6ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_mu(
@@ -2062,7 +2062,7 @@ void test_vloxseg6ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_mu(
@@ -2083,7 +2083,7 @@ void test_vloxseg6ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg6ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_mu(
@@ -2125,7 +2125,7 @@ void test_vloxseg6ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_mu(
@@ -2146,7 +2146,7 @@ void test_vloxseg6ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_mu(
@@ -2167,7 +2167,7 @@ void test_vloxseg6ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_mu(
@@ -2188,6 +2188,6 @@ void test_vloxseg6ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c
index 58dad7032009..a8841042244b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vloxseg6ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vloxseg6ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_tu(
@@ -88,7 +88,7 @@ void test_vloxseg6ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_tu(
@@ -109,7 +109,7 @@ void test_vloxseg6ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_tu(
@@ -130,7 +130,7 @@ void test_vloxseg6ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_tu(
@@ -151,7 +151,7 @@ void test_vloxseg6ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_tu(
@@ -172,7 +172,7 @@ void test_vloxseg6ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_tu(
@@ -193,7 +193,7 @@ void test_vloxseg6ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_tu(
@@ -214,7 +214,7 @@ void test_vloxseg6ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_tu(
@@ -235,7 +235,7 @@ void test_vloxseg6ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_tu(
@@ -256,7 +256,7 @@ void test_vloxseg6ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vloxseg6ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_tu(
@@ -298,7 +298,7 @@ void test_vloxseg6ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_tu(
@@ -319,7 +319,7 @@ void test_vloxseg6ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_tu(
@@ -340,7 +340,7 @@ void test_vloxseg6ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_tu(
@@ -361,7 +361,7 @@ void test_vloxseg6ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_tu(
@@ -382,7 +382,7 @@ void test_vloxseg6ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_tu(
@@ -403,7 +403,7 @@ void test_vloxseg6ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_tu(
@@ -424,7 +424,7 @@ void test_vloxseg6ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_tu(
@@ -445,7 +445,7 @@ void test_vloxseg6ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_tu(
@@ -466,7 +466,7 @@ void test_vloxseg6ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_tu(
@@ -487,7 +487,7 @@ void test_vloxseg6ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_tu(
@@ -508,7 +508,7 @@ void test_vloxseg6ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_tu(
@@ -529,7 +529,7 @@ void test_vloxseg6ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_tu(
@@ -550,7 +550,7 @@ void test_vloxseg6ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_tum(
@@ -571,7 +571,7 @@ void test_vloxseg6ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_tum(
@@ -592,7 +592,7 @@ void test_vloxseg6ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_tum(
@@ -613,7 +613,7 @@ void test_vloxseg6ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vloxseg6ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_tum(
@@ -655,7 +655,7 @@ void test_vloxseg6ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_tum(
@@ -676,7 +676,7 @@ void test_vloxseg6ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_tum(
@@ -697,7 +697,7 @@ void test_vloxseg6ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_tum(
@@ -718,7 +718,7 @@ void test_vloxseg6ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vloxseg6ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_tum(
@@ -760,7 +760,7 @@ void test_vloxseg6ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_tum(
@@ -781,7 +781,7 @@ void test_vloxseg6ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_tum(
@@ -802,7 +802,7 @@ void test_vloxseg6ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_tum(
@@ -823,7 +823,7 @@ void test_vloxseg6ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vloxseg6ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_tum(
@@ -865,7 +865,7 @@ void test_vloxseg6ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_tum(
@@ -886,7 +886,7 @@ void test_vloxseg6ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_tum(
@@ -907,7 +907,7 @@ void test_vloxseg6ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_tum(
@@ -928,7 +928,7 @@ void test_vloxseg6ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vloxseg6ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_tum(
@@ -970,7 +970,7 @@ void test_vloxseg6ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_tum(
@@ -991,7 +991,7 @@ void test_vloxseg6ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_tum(
@@ -1012,7 +1012,7 @@ void test_vloxseg6ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_tum(
@@ -1033,7 +1033,7 @@ void test_vloxseg6ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg6ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_tum(
@@ -1075,7 +1075,7 @@ void test_vloxseg6ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_tum(
@@ -1096,7 +1096,7 @@ void test_vloxseg6ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_tumu(
@@ -1117,7 +1117,7 @@ void test_vloxseg6ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_tumu(
@@ -1138,7 +1138,7 @@ void test_vloxseg6ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vloxseg6ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_tumu(
@@ -1180,7 +1180,7 @@ void test_vloxseg6ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_tumu(
@@ -1201,7 +1201,7 @@ void test_vloxseg6ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_tumu(
@@ -1222,7 +1222,7 @@ void test_vloxseg6ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_tumu(
@@ -1243,7 +1243,7 @@ void test_vloxseg6ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vloxseg6ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_tumu(
@@ -1285,7 +1285,7 @@ void test_vloxseg6ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_tumu(
@@ -1306,7 +1306,7 @@ void test_vloxseg6ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_tumu(
@@ -1327,7 +1327,7 @@ void test_vloxseg6ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_tumu(
@@ -1348,7 +1348,7 @@ void test_vloxseg6ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vloxseg6ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_tumu(
@@ -1390,7 +1390,7 @@ void test_vloxseg6ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_tumu(
@@ -1411,7 +1411,7 @@ void test_vloxseg6ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_tumu(
@@ -1432,7 +1432,7 @@ void test_vloxseg6ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_tumu(
@@ -1453,7 +1453,7 @@ void test_vloxseg6ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_tumu(
@@ -1474,7 +1474,7 @@ void test_vloxseg6ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_tumu(
@@ -1495,7 +1495,7 @@ void test_vloxseg6ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_tumu(
@@ -1516,7 +1516,7 @@ void test_vloxseg6ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_tumu(
@@ -1537,7 +1537,7 @@ void test_vloxseg6ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_tumu(
@@ -1558,7 +1558,7 @@ void test_vloxseg6ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg6ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_tumu(
@@ -1600,7 +1600,7 @@ void test_vloxseg6ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_tumu(
@@ -1621,7 +1621,7 @@ void test_vloxseg6ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_tumu(
@@ -1642,7 +1642,7 @@ void test_vloxseg6ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_mu(
@@ -1663,7 +1663,7 @@ void test_vloxseg6ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_mu(
@@ -1684,7 +1684,7 @@ void test_vloxseg6ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_mu(
@@ -1705,7 +1705,7 @@ void test_vloxseg6ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_mu(
@@ -1726,7 +1726,7 @@ void test_vloxseg6ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_mu(
@@ -1747,7 +1747,7 @@ void test_vloxseg6ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_mu(
@@ -1768,7 +1768,7 @@ void test_vloxseg6ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_mu(
@@ -1789,7 +1789,7 @@ void test_vloxseg6ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_mu(
@@ -1810,7 +1810,7 @@ void test_vloxseg6ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_mu(
@@ -1831,7 +1831,7 @@ void test_vloxseg6ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_mu(
@@ -1852,7 +1852,7 @@ void test_vloxseg6ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_mu(
@@ -1873,7 +1873,7 @@ void test_vloxseg6ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_mu(
@@ -1894,7 +1894,7 @@ void test_vloxseg6ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vloxseg6ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_mu(
@@ -1936,7 +1936,7 @@ void test_vloxseg6ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_mu(
@@ -1957,7 +1957,7 @@ void test_vloxseg6ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_mu(
@@ -1978,7 +1978,7 @@ void test_vloxseg6ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_mu(
@@ -1999,7 +1999,7 @@ void test_vloxseg6ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_mu(
@@ -2020,7 +2020,7 @@ void test_vloxseg6ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_mu(
@@ -2041,7 +2041,7 @@ void test_vloxseg6ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_mu(
@@ -2062,7 +2062,7 @@ void test_vloxseg6ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_mu(
@@ -2083,7 +2083,7 @@ void test_vloxseg6ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg6ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_mu(
@@ -2125,7 +2125,7 @@ void test_vloxseg6ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_mu(
@@ -2146,7 +2146,7 @@ void test_vloxseg6ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_mu(
@@ -2167,7 +2167,7 @@ void test_vloxseg6ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_mu(
@@ -2188,6 +2188,6 @@ void test_vloxseg6ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg6ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vloxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c
index f6a198cf025a..6f13eabf43a4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vloxseg7ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vloxseg7ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_tu(
@@ -96,7 +96,7 @@ void test_vloxseg7ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_tu(
@@ -119,7 +119,7 @@ void test_vloxseg7ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_tu(
@@ -142,7 +142,7 @@ void test_vloxseg7ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_tu(
@@ -165,7 +165,7 @@ void test_vloxseg7ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_tu(
@@ -188,7 +188,7 @@ void test_vloxseg7ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_tu(
@@ -211,7 +211,7 @@ void test_vloxseg7ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_tu(
@@ -234,7 +234,7 @@ void test_vloxseg7ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_tu(
@@ -257,7 +257,7 @@ void test_vloxseg7ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_tu(
@@ -280,7 +280,7 @@ void test_vloxseg7ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_tu(
@@ -303,7 +303,7 @@ void test_vloxseg7ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_tu(
@@ -326,7 +326,7 @@ void test_vloxseg7ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_tu(
@@ -349,7 +349,7 @@ void test_vloxseg7ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_tu(
@@ -372,7 +372,7 @@ void test_vloxseg7ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vloxseg7ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_tu(
@@ -418,7 +418,7 @@ void test_vloxseg7ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_tu(
@@ -441,7 +441,7 @@ void test_vloxseg7ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_tu(
@@ -464,7 +464,7 @@ void test_vloxseg7ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_tu(
@@ -487,7 +487,7 @@ void test_vloxseg7ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_tu(
@@ -510,7 +510,7 @@ void test_vloxseg7ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_tu(
@@ -533,7 +533,7 @@ void test_vloxseg7ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_tu(
@@ -556,7 +556,7 @@ void test_vloxseg7ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_tu(
@@ -579,7 +579,7 @@ void test_vloxseg7ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vloxseg7ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_tum(
@@ -625,7 +625,7 @@ void test_vloxseg7ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_tum(
@@ -648,7 +648,7 @@ void test_vloxseg7ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_tum(
@@ -671,7 +671,7 @@ void test_vloxseg7ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_tum(
@@ -694,7 +694,7 @@ void test_vloxseg7ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_tum(
@@ -717,7 +717,7 @@ void test_vloxseg7ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_tum(
@@ -740,7 +740,7 @@ void test_vloxseg7ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_tum(
@@ -763,7 +763,7 @@ void test_vloxseg7ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vloxseg7ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_tum(
@@ -809,7 +809,7 @@ void test_vloxseg7ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_tum(
@@ -832,7 +832,7 @@ void test_vloxseg7ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_tum(
@@ -855,7 +855,7 @@ void test_vloxseg7ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_tum(
@@ -878,7 +878,7 @@ void test_vloxseg7ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vloxseg7ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_tum(
@@ -924,7 +924,7 @@ void test_vloxseg7ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_tum(
@@ -947,7 +947,7 @@ void test_vloxseg7ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_tum(
@@ -970,7 +970,7 @@ void test_vloxseg7ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_tum(
@@ -993,7 +993,7 @@ void test_vloxseg7ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_tum(
@@ -1016,7 +1016,7 @@ void test_vloxseg7ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_tum(
@@ -1039,7 +1039,7 @@ void test_vloxseg7ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_tum(
@@ -1062,7 +1062,7 @@ void test_vloxseg7ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_tum(
@@ -1085,7 +1085,7 @@ void test_vloxseg7ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_tum(
@@ -1108,7 +1108,7 @@ void test_vloxseg7ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_tum(
@@ -1131,7 +1131,7 @@ void test_vloxseg7ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_tum(
@@ -1154,7 +1154,7 @@ void test_vloxseg7ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_tum(
@@ -1177,7 +1177,7 @@ void test_vloxseg7ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_tum(
@@ -1200,7 +1200,7 @@ void test_vloxseg7ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_tumu(
@@ -1223,7 +1223,7 @@ void test_vloxseg7ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_tumu(
@@ -1246,7 +1246,7 @@ void test_vloxseg7ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_tumu(
@@ -1269,7 +1269,7 @@ void test_vloxseg7ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_tumu(
@@ -1292,7 +1292,7 @@ void test_vloxseg7ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_tumu(
@@ -1315,7 +1315,7 @@ void test_vloxseg7ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_tumu(
@@ -1338,7 +1338,7 @@ void test_vloxseg7ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_tumu(
@@ -1361,7 +1361,7 @@ void test_vloxseg7ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_tumu(
@@ -1384,7 +1384,7 @@ void test_vloxseg7ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_tumu(
@@ -1407,7 +1407,7 @@ void test_vloxseg7ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_tumu(
@@ -1430,7 +1430,7 @@ void test_vloxseg7ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_tumu(
@@ -1453,7 +1453,7 @@ void test_vloxseg7ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_tumu(
@@ -1476,7 +1476,7 @@ void test_vloxseg7ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vloxseg7ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_tumu(
@@ -1522,7 +1522,7 @@ void test_vloxseg7ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_tumu(
@@ -1545,7 +1545,7 @@ void test_vloxseg7ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_tumu(
@@ -1568,7 +1568,7 @@ void test_vloxseg7ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_tumu(
@@ -1591,7 +1591,7 @@ void test_vloxseg7ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_tumu(
@@ -1614,7 +1614,7 @@ void test_vloxseg7ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_tumu(
@@ -1637,7 +1637,7 @@ void test_vloxseg7ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_tumu(
@@ -1660,7 +1660,7 @@ void test_vloxseg7ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_tumu(
@@ -1683,7 +1683,7 @@ void test_vloxseg7ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_tumu(
@@ -1706,7 +1706,7 @@ void test_vloxseg7ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_tumu(
@@ -1729,7 +1729,7 @@ void test_vloxseg7ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_tumu(
@@ -1752,7 +1752,7 @@ void test_vloxseg7ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_tumu(
@@ -1775,7 +1775,7 @@ void test_vloxseg7ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_tumu(
@@ -1798,7 +1798,7 @@ void test_vloxseg7ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_mu(
@@ -1821,7 +1821,7 @@ void test_vloxseg7ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_mu(
@@ -1844,7 +1844,7 @@ void test_vloxseg7ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_mu(
@@ -1867,7 +1867,7 @@ void test_vloxseg7ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_mu(
@@ -1890,7 +1890,7 @@ void test_vloxseg7ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_mu(
@@ -1913,7 +1913,7 @@ void test_vloxseg7ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_mu(
@@ -1936,7 +1936,7 @@ void test_vloxseg7ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vloxseg7ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_mu(
@@ -1982,7 +1982,7 @@ void test_vloxseg7ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_mu(
@@ -2005,7 +2005,7 @@ void test_vloxseg7ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_mu(
@@ -2028,7 +2028,7 @@ void test_vloxseg7ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_mu(
@@ -2051,7 +2051,7 @@ void test_vloxseg7ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_mu(
@@ -2074,7 +2074,7 @@ void test_vloxseg7ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_mu(
@@ -2097,7 +2097,7 @@ void test_vloxseg7ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_mu(
@@ -2120,7 +2120,7 @@ void test_vloxseg7ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_mu(
@@ -2143,7 +2143,7 @@ void test_vloxseg7ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_mu(
@@ -2166,7 +2166,7 @@ void test_vloxseg7ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_mu(
@@ -2189,7 +2189,7 @@ void test_vloxseg7ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_mu(
@@ -2212,7 +2212,7 @@ void test_vloxseg7ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_mu(
@@ -2235,7 +2235,7 @@ void test_vloxseg7ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_mu(
@@ -2258,7 +2258,7 @@ void test_vloxseg7ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_mu(
@@ -2281,7 +2281,7 @@ void test_vloxseg7ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vloxseg7ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_mu(
@@ -2327,7 +2327,7 @@ void test_vloxseg7ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_mu(
@@ -2350,7 +2350,7 @@ void test_vloxseg7ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_mu(
@@ -2373,7 +2373,7 @@ void test_vloxseg7ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_mu(
@@ -2396,6 +2396,6 @@ void test_vloxseg7ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c
index 92e1fb0b24bf..0897b45f84bc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vloxseg7ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vloxseg7ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_tu(
@@ -96,7 +96,7 @@ void test_vloxseg7ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_tu(
@@ -119,7 +119,7 @@ void test_vloxseg7ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_tu(
@@ -142,7 +142,7 @@ void test_vloxseg7ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_tu(
@@ -165,7 +165,7 @@ void test_vloxseg7ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_tu(
@@ -188,7 +188,7 @@ void test_vloxseg7ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_tu(
@@ -211,7 +211,7 @@ void test_vloxseg7ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_tu(
@@ -234,7 +234,7 @@ void test_vloxseg7ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_tu(
@@ -257,7 +257,7 @@ void test_vloxseg7ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_tu(
@@ -280,7 +280,7 @@ void test_vloxseg7ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_tu(
@@ -303,7 +303,7 @@ void test_vloxseg7ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_tu(
@@ -326,7 +326,7 @@ void test_vloxseg7ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_tu(
@@ -349,7 +349,7 @@ void test_vloxseg7ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_tu(
@@ -372,7 +372,7 @@ void test_vloxseg7ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vloxseg7ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_tu(
@@ -418,7 +418,7 @@ void test_vloxseg7ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_tu(
@@ -441,7 +441,7 @@ void test_vloxseg7ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_tu(
@@ -464,7 +464,7 @@ void test_vloxseg7ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_tu(
@@ -487,7 +487,7 @@ void test_vloxseg7ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_tu(
@@ -510,7 +510,7 @@ void test_vloxseg7ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_tu(
@@ -533,7 +533,7 @@ void test_vloxseg7ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_tu(
@@ -556,7 +556,7 @@ void test_vloxseg7ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_tu(
@@ -579,7 +579,7 @@ void test_vloxseg7ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vloxseg7ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_tum(
@@ -625,7 +625,7 @@ void test_vloxseg7ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_tum(
@@ -648,7 +648,7 @@ void test_vloxseg7ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_tum(
@@ -671,7 +671,7 @@ void test_vloxseg7ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_tum(
@@ -694,7 +694,7 @@ void test_vloxseg7ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_tum(
@@ -717,7 +717,7 @@ void test_vloxseg7ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_tum(
@@ -740,7 +740,7 @@ void test_vloxseg7ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_tum(
@@ -763,7 +763,7 @@ void test_vloxseg7ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vloxseg7ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_tum(
@@ -809,7 +809,7 @@ void test_vloxseg7ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_tum(
@@ -832,7 +832,7 @@ void test_vloxseg7ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_tum(
@@ -855,7 +855,7 @@ void test_vloxseg7ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_tum(
@@ -878,7 +878,7 @@ void test_vloxseg7ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vloxseg7ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_tum(
@@ -924,7 +924,7 @@ void test_vloxseg7ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_tum(
@@ -947,7 +947,7 @@ void test_vloxseg7ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_tum(
@@ -970,7 +970,7 @@ void test_vloxseg7ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_tum(
@@ -993,7 +993,7 @@ void test_vloxseg7ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_tum(
@@ -1016,7 +1016,7 @@ void test_vloxseg7ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_tum(
@@ -1039,7 +1039,7 @@ void test_vloxseg7ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_tum(
@@ -1062,7 +1062,7 @@ void test_vloxseg7ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_tum(
@@ -1085,7 +1085,7 @@ void test_vloxseg7ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_tum(
@@ -1108,7 +1108,7 @@ void test_vloxseg7ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_tum(
@@ -1131,7 +1131,7 @@ void test_vloxseg7ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_tum(
@@ -1154,7 +1154,7 @@ void test_vloxseg7ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_tum(
@@ -1177,7 +1177,7 @@ void test_vloxseg7ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_tum(
@@ -1200,7 +1200,7 @@ void test_vloxseg7ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_tumu(
@@ -1223,7 +1223,7 @@ void test_vloxseg7ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_tumu(
@@ -1246,7 +1246,7 @@ void test_vloxseg7ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_tumu(
@@ -1269,7 +1269,7 @@ void test_vloxseg7ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_tumu(
@@ -1292,7 +1292,7 @@ void test_vloxseg7ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_tumu(
@@ -1315,7 +1315,7 @@ void test_vloxseg7ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_tumu(
@@ -1338,7 +1338,7 @@ void test_vloxseg7ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_tumu(
@@ -1361,7 +1361,7 @@ void test_vloxseg7ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_tumu(
@@ -1384,7 +1384,7 @@ void test_vloxseg7ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_tumu(
@@ -1407,7 +1407,7 @@ void test_vloxseg7ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_tumu(
@@ -1430,7 +1430,7 @@ void test_vloxseg7ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_tumu(
@@ -1453,7 +1453,7 @@ void test_vloxseg7ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_tumu(
@@ -1476,7 +1476,7 @@ void test_vloxseg7ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vloxseg7ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_tumu(
@@ -1522,7 +1522,7 @@ void test_vloxseg7ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_tumu(
@@ -1545,7 +1545,7 @@ void test_vloxseg7ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_tumu(
@@ -1568,7 +1568,7 @@ void test_vloxseg7ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_tumu(
@@ -1591,7 +1591,7 @@ void test_vloxseg7ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_tumu(
@@ -1614,7 +1614,7 @@ void test_vloxseg7ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_tumu(
@@ -1637,7 +1637,7 @@ void test_vloxseg7ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_tumu(
@@ -1660,7 +1660,7 @@ void test_vloxseg7ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_tumu(
@@ -1683,7 +1683,7 @@ void test_vloxseg7ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_tumu(
@@ -1706,7 +1706,7 @@ void test_vloxseg7ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_tumu(
@@ -1729,7 +1729,7 @@ void test_vloxseg7ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_tumu(
@@ -1752,7 +1752,7 @@ void test_vloxseg7ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_tumu(
@@ -1775,7 +1775,7 @@ void test_vloxseg7ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_tumu(
@@ -1798,7 +1798,7 @@ void test_vloxseg7ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_mu(
@@ -1821,7 +1821,7 @@ void test_vloxseg7ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_mu(
@@ -1844,7 +1844,7 @@ void test_vloxseg7ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_mu(
@@ -1867,7 +1867,7 @@ void test_vloxseg7ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_mu(
@@ -1890,7 +1890,7 @@ void test_vloxseg7ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_mu(
@@ -1913,7 +1913,7 @@ void test_vloxseg7ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_mu(
@@ -1936,7 +1936,7 @@ void test_vloxseg7ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vloxseg7ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_mu(
@@ -1982,7 +1982,7 @@ void test_vloxseg7ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_mu(
@@ -2005,7 +2005,7 @@ void test_vloxseg7ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_mu(
@@ -2028,7 +2028,7 @@ void test_vloxseg7ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_mu(
@@ -2051,7 +2051,7 @@ void test_vloxseg7ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_mu(
@@ -2074,7 +2074,7 @@ void test_vloxseg7ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_mu(
@@ -2097,7 +2097,7 @@ void test_vloxseg7ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_mu(
@@ -2120,7 +2120,7 @@ void test_vloxseg7ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_mu(
@@ -2143,7 +2143,7 @@ void test_vloxseg7ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_mu(
@@ -2166,7 +2166,7 @@ void test_vloxseg7ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_mu(
@@ -2189,7 +2189,7 @@ void test_vloxseg7ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_mu(
@@ -2212,7 +2212,7 @@ void test_vloxseg7ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_mu(
@@ -2235,7 +2235,7 @@ void test_vloxseg7ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_mu(
@@ -2258,7 +2258,7 @@ void test_vloxseg7ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_mu(
@@ -2281,7 +2281,7 @@ void test_vloxseg7ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vloxseg7ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_mu(
@@ -2327,7 +2327,7 @@ void test_vloxseg7ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_mu(
@@ -2350,7 +2350,7 @@ void test_vloxseg7ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_mu(
@@ -2373,7 +2373,7 @@ void test_vloxseg7ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_mu(
@@ -2396,6 +2396,6 @@ void test_vloxseg7ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c
index 53407ab7c238..1c9d40ae3b6e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vloxseg7ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vloxseg7ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_tu(
@@ -96,7 +96,7 @@ void test_vloxseg7ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_tu(
@@ -119,7 +119,7 @@ void test_vloxseg7ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_tu(
@@ -142,7 +142,7 @@ void test_vloxseg7ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_tu(
@@ -165,7 +165,7 @@ void test_vloxseg7ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_tu(
@@ -188,7 +188,7 @@ void test_vloxseg7ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_tu(
@@ -211,7 +211,7 @@ void test_vloxseg7ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_tu(
@@ -234,7 +234,7 @@ void test_vloxseg7ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_tu(
@@ -257,7 +257,7 @@ void test_vloxseg7ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_tu(
@@ -280,7 +280,7 @@ void test_vloxseg7ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_tu(
@@ -303,7 +303,7 @@ void test_vloxseg7ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_tu(
@@ -326,7 +326,7 @@ void test_vloxseg7ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_tu(
@@ -349,7 +349,7 @@ void test_vloxseg7ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_tu(
@@ -372,7 +372,7 @@ void test_vloxseg7ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vloxseg7ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_tu(
@@ -418,7 +418,7 @@ void test_vloxseg7ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_tu(
@@ -441,7 +441,7 @@ void test_vloxseg7ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_tu(
@@ -464,7 +464,7 @@ void test_vloxseg7ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_tu(
@@ -487,7 +487,7 @@ void test_vloxseg7ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_tu(
@@ -510,7 +510,7 @@ void test_vloxseg7ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_tu(
@@ -533,7 +533,7 @@ void test_vloxseg7ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_tu(
@@ -556,7 +556,7 @@ void test_vloxseg7ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_tu(
@@ -579,7 +579,7 @@ void test_vloxseg7ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vloxseg7ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_tum(
@@ -625,7 +625,7 @@ void test_vloxseg7ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_tum(
@@ -648,7 +648,7 @@ void test_vloxseg7ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_tum(
@@ -671,7 +671,7 @@ void test_vloxseg7ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_tum(
@@ -694,7 +694,7 @@ void test_vloxseg7ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_tum(
@@ -717,7 +717,7 @@ void test_vloxseg7ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_tum(
@@ -740,7 +740,7 @@ void test_vloxseg7ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_tum(
@@ -763,7 +763,7 @@ void test_vloxseg7ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vloxseg7ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_tum(
@@ -809,7 +809,7 @@ void test_vloxseg7ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_tum(
@@ -832,7 +832,7 @@ void test_vloxseg7ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_tum(
@@ -855,7 +855,7 @@ void test_vloxseg7ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_tum(
@@ -878,7 +878,7 @@ void test_vloxseg7ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vloxseg7ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_tum(
@@ -924,7 +924,7 @@ void test_vloxseg7ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_tum(
@@ -947,7 +947,7 @@ void test_vloxseg7ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_tum(
@@ -970,7 +970,7 @@ void test_vloxseg7ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_tum(
@@ -993,7 +993,7 @@ void test_vloxseg7ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_tum(
@@ -1016,7 +1016,7 @@ void test_vloxseg7ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_tum(
@@ -1039,7 +1039,7 @@ void test_vloxseg7ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_tum(
@@ -1062,7 +1062,7 @@ void test_vloxseg7ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_tum(
@@ -1085,7 +1085,7 @@ void test_vloxseg7ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_tum(
@@ -1108,7 +1108,7 @@ void test_vloxseg7ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_tum(
@@ -1131,7 +1131,7 @@ void test_vloxseg7ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_tum(
@@ -1154,7 +1154,7 @@ void test_vloxseg7ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_tum(
@@ -1177,7 +1177,7 @@ void test_vloxseg7ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_tum(
@@ -1200,7 +1200,7 @@ void test_vloxseg7ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_tumu(
@@ -1223,7 +1223,7 @@ void test_vloxseg7ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_tumu(
@@ -1246,7 +1246,7 @@ void test_vloxseg7ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_tumu(
@@ -1269,7 +1269,7 @@ void test_vloxseg7ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_tumu(
@@ -1292,7 +1292,7 @@ void test_vloxseg7ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_tumu(
@@ -1315,7 +1315,7 @@ void test_vloxseg7ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_tumu(
@@ -1338,7 +1338,7 @@ void test_vloxseg7ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_tumu(
@@ -1361,7 +1361,7 @@ void test_vloxseg7ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_tumu(
@@ -1384,7 +1384,7 @@ void test_vloxseg7ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_tumu(
@@ -1407,7 +1407,7 @@ void test_vloxseg7ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_tumu(
@@ -1430,7 +1430,7 @@ void test_vloxseg7ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_tumu(
@@ -1453,7 +1453,7 @@ void test_vloxseg7ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_tumu(
@@ -1476,7 +1476,7 @@ void test_vloxseg7ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vloxseg7ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_tumu(
@@ -1522,7 +1522,7 @@ void test_vloxseg7ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_tumu(
@@ -1545,7 +1545,7 @@ void test_vloxseg7ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_tumu(
@@ -1568,7 +1568,7 @@ void test_vloxseg7ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_tumu(
@@ -1591,7 +1591,7 @@ void test_vloxseg7ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_tumu(
@@ -1614,7 +1614,7 @@ void test_vloxseg7ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_tumu(
@@ -1637,7 +1637,7 @@ void test_vloxseg7ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_tumu(
@@ -1660,7 +1660,7 @@ void test_vloxseg7ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_tumu(
@@ -1683,7 +1683,7 @@ void test_vloxseg7ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_tumu(
@@ -1706,7 +1706,7 @@ void test_vloxseg7ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_tumu(
@@ -1729,7 +1729,7 @@ void test_vloxseg7ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_tumu(
@@ -1752,7 +1752,7 @@ void test_vloxseg7ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_tumu(
@@ -1775,7 +1775,7 @@ void test_vloxseg7ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_tumu(
@@ -1798,7 +1798,7 @@ void test_vloxseg7ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_mu(
@@ -1821,7 +1821,7 @@ void test_vloxseg7ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_mu(
@@ -1844,7 +1844,7 @@ void test_vloxseg7ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_mu(
@@ -1867,7 +1867,7 @@ void test_vloxseg7ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_mu(
@@ -1890,7 +1890,7 @@ void test_vloxseg7ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_mu(
@@ -1913,7 +1913,7 @@ void test_vloxseg7ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_mu(
@@ -1936,7 +1936,7 @@ void test_vloxseg7ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vloxseg7ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_mu(
@@ -1982,7 +1982,7 @@ void test_vloxseg7ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_mu(
@@ -2005,7 +2005,7 @@ void test_vloxseg7ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_mu(
@@ -2028,7 +2028,7 @@ void test_vloxseg7ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_mu(
@@ -2051,7 +2051,7 @@ void test_vloxseg7ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_mu(
@@ -2074,7 +2074,7 @@ void test_vloxseg7ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_mu(
@@ -2097,7 +2097,7 @@ void test_vloxseg7ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_mu(
@@ -2120,7 +2120,7 @@ void test_vloxseg7ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_mu(
@@ -2143,7 +2143,7 @@ void test_vloxseg7ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_mu(
@@ -2166,7 +2166,7 @@ void test_vloxseg7ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_mu(
@@ -2189,7 +2189,7 @@ void test_vloxseg7ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_mu(
@@ -2212,7 +2212,7 @@ void test_vloxseg7ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_mu(
@@ -2235,7 +2235,7 @@ void test_vloxseg7ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_mu(
@@ -2258,7 +2258,7 @@ void test_vloxseg7ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_mu(
@@ -2281,7 +2281,7 @@ void test_vloxseg7ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vloxseg7ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_mu(
@@ -2327,7 +2327,7 @@ void test_vloxseg7ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_mu(
@@ -2350,7 +2350,7 @@ void test_vloxseg7ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_mu(
@@ -2373,7 +2373,7 @@ void test_vloxseg7ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_mu(
@@ -2396,6 +2396,6 @@ void test_vloxseg7ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c
index d18c02fae90d..523630205722 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vloxseg7ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vloxseg7ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_tu(
@@ -96,7 +96,7 @@ void test_vloxseg7ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_tu(
@@ -119,7 +119,7 @@ void test_vloxseg7ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_tu(
@@ -142,7 +142,7 @@ void test_vloxseg7ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_tu(
@@ -165,7 +165,7 @@ void test_vloxseg7ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_tu(
@@ -188,7 +188,7 @@ void test_vloxseg7ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_tu(
@@ -211,7 +211,7 @@ void test_vloxseg7ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_tu(
@@ -234,7 +234,7 @@ void test_vloxseg7ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_tu(
@@ -257,7 +257,7 @@ void test_vloxseg7ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_tu(
@@ -280,7 +280,7 @@ void test_vloxseg7ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_tu(
@@ -303,7 +303,7 @@ void test_vloxseg7ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_tu(
@@ -326,7 +326,7 @@ void test_vloxseg7ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_tu(
@@ -349,7 +349,7 @@ void test_vloxseg7ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_tu(
@@ -372,7 +372,7 @@ void test_vloxseg7ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vloxseg7ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_tu(
@@ -418,7 +418,7 @@ void test_vloxseg7ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_tu(
@@ -441,7 +441,7 @@ void test_vloxseg7ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_tu(
@@ -464,7 +464,7 @@ void test_vloxseg7ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_tu(
@@ -487,7 +487,7 @@ void test_vloxseg7ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_tu(
@@ -510,7 +510,7 @@ void test_vloxseg7ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_tu(
@@ -533,7 +533,7 @@ void test_vloxseg7ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_tu(
@@ -556,7 +556,7 @@ void test_vloxseg7ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_tu(
@@ -579,7 +579,7 @@ void test_vloxseg7ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vloxseg7ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_tum(
@@ -625,7 +625,7 @@ void test_vloxseg7ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_tum(
@@ -648,7 +648,7 @@ void test_vloxseg7ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_tum(
@@ -671,7 +671,7 @@ void test_vloxseg7ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_tum(
@@ -694,7 +694,7 @@ void test_vloxseg7ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_tum(
@@ -717,7 +717,7 @@ void test_vloxseg7ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_tum(
@@ -740,7 +740,7 @@ void test_vloxseg7ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_tum(
@@ -763,7 +763,7 @@ void test_vloxseg7ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vloxseg7ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_tum(
@@ -809,7 +809,7 @@ void test_vloxseg7ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_tum(
@@ -832,7 +832,7 @@ void test_vloxseg7ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_tum(
@@ -855,7 +855,7 @@ void test_vloxseg7ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_tum(
@@ -878,7 +878,7 @@ void test_vloxseg7ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vloxseg7ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_tum(
@@ -924,7 +924,7 @@ void test_vloxseg7ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_tum(
@@ -947,7 +947,7 @@ void test_vloxseg7ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_tum(
@@ -970,7 +970,7 @@ void test_vloxseg7ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_tum(
@@ -993,7 +993,7 @@ void test_vloxseg7ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_tum(
@@ -1016,7 +1016,7 @@ void test_vloxseg7ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_tum(
@@ -1039,7 +1039,7 @@ void test_vloxseg7ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_tum(
@@ -1062,7 +1062,7 @@ void test_vloxseg7ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_tum(
@@ -1085,7 +1085,7 @@ void test_vloxseg7ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_tum(
@@ -1108,7 +1108,7 @@ void test_vloxseg7ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_tum(
@@ -1131,7 +1131,7 @@ void test_vloxseg7ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_tum(
@@ -1154,7 +1154,7 @@ void test_vloxseg7ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_tum(
@@ -1177,7 +1177,7 @@ void test_vloxseg7ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_tum(
@@ -1200,7 +1200,7 @@ void test_vloxseg7ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_tumu(
@@ -1223,7 +1223,7 @@ void test_vloxseg7ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_tumu(
@@ -1246,7 +1246,7 @@ void test_vloxseg7ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_tumu(
@@ -1269,7 +1269,7 @@ void test_vloxseg7ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_tumu(
@@ -1292,7 +1292,7 @@ void test_vloxseg7ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_tumu(
@@ -1315,7 +1315,7 @@ void test_vloxseg7ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_tumu(
@@ -1338,7 +1338,7 @@ void test_vloxseg7ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_tumu(
@@ -1361,7 +1361,7 @@ void test_vloxseg7ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_tumu(
@@ -1384,7 +1384,7 @@ void test_vloxseg7ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_tumu(
@@ -1407,7 +1407,7 @@ void test_vloxseg7ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_tumu(
@@ -1430,7 +1430,7 @@ void test_vloxseg7ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_tumu(
@@ -1453,7 +1453,7 @@ void test_vloxseg7ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_tumu(
@@ -1476,7 +1476,7 @@ void test_vloxseg7ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vloxseg7ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_tumu(
@@ -1522,7 +1522,7 @@ void test_vloxseg7ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_tumu(
@@ -1545,7 +1545,7 @@ void test_vloxseg7ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_tumu(
@@ -1568,7 +1568,7 @@ void test_vloxseg7ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_tumu(
@@ -1591,7 +1591,7 @@ void test_vloxseg7ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_tumu(
@@ -1614,7 +1614,7 @@ void test_vloxseg7ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_tumu(
@@ -1637,7 +1637,7 @@ void test_vloxseg7ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_tumu(
@@ -1660,7 +1660,7 @@ void test_vloxseg7ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_tumu(
@@ -1683,7 +1683,7 @@ void test_vloxseg7ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_tumu(
@@ -1706,7 +1706,7 @@ void test_vloxseg7ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_tumu(
@@ -1729,7 +1729,7 @@ void test_vloxseg7ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_tumu(
@@ -1752,7 +1752,7 @@ void test_vloxseg7ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_tumu(
@@ -1775,7 +1775,7 @@ void test_vloxseg7ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_tumu(
@@ -1798,7 +1798,7 @@ void test_vloxseg7ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_mu(
@@ -1821,7 +1821,7 @@ void test_vloxseg7ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_mu(
@@ -1844,7 +1844,7 @@ void test_vloxseg7ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_mu(
@@ -1867,7 +1867,7 @@ void test_vloxseg7ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_mu(
@@ -1890,7 +1890,7 @@ void test_vloxseg7ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_mu(
@@ -1913,7 +1913,7 @@ void test_vloxseg7ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_mu(
@@ -1936,7 +1936,7 @@ void test_vloxseg7ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vloxseg7ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_mu(
@@ -1982,7 +1982,7 @@ void test_vloxseg7ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_mu(
@@ -2005,7 +2005,7 @@ void test_vloxseg7ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_mu(
@@ -2028,7 +2028,7 @@ void test_vloxseg7ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_mu(
@@ -2051,7 +2051,7 @@ void test_vloxseg7ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_mu(
@@ -2074,7 +2074,7 @@ void test_vloxseg7ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_mu(
@@ -2097,7 +2097,7 @@ void test_vloxseg7ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_mu(
@@ -2120,7 +2120,7 @@ void test_vloxseg7ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_mu(
@@ -2143,7 +2143,7 @@ void test_vloxseg7ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_mu(
@@ -2166,7 +2166,7 @@ void test_vloxseg7ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_mu(
@@ -2189,7 +2189,7 @@ void test_vloxseg7ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_mu(
@@ -2212,7 +2212,7 @@ void test_vloxseg7ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_mu(
@@ -2235,7 +2235,7 @@ void test_vloxseg7ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_mu(
@@ -2258,7 +2258,7 @@ void test_vloxseg7ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_mu(
@@ -2281,7 +2281,7 @@ void test_vloxseg7ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vloxseg7ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_mu(
@@ -2327,7 +2327,7 @@ void test_vloxseg7ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_mu(
@@ -2350,7 +2350,7 @@ void test_vloxseg7ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_mu(
@@ -2373,7 +2373,7 @@ void test_vloxseg7ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_mu(
@@ -2396,6 +2396,6 @@ void test_vloxseg7ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg7ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vloxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c
index ed73060f889c..c6cae2d6726f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vloxseg8ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vloxseg8ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_tu(
@@ -104,7 +104,7 @@ void test_vloxseg8ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_tu(
@@ -129,7 +129,7 @@ void test_vloxseg8ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_tu(
@@ -154,7 +154,7 @@ void test_vloxseg8ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_tu(
@@ -179,7 +179,7 @@ void test_vloxseg8ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_tu(
@@ -204,7 +204,7 @@ void test_vloxseg8ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_tu(
@@ -229,7 +229,7 @@ void test_vloxseg8ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_tu(
@@ -254,7 +254,7 @@ void test_vloxseg8ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_tu(
@@ -279,7 +279,7 @@ void test_vloxseg8ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_tu(
@@ -304,7 +304,7 @@ void test_vloxseg8ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_tu(
@@ -329,7 +329,7 @@ void test_vloxseg8ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_tu(
@@ -354,7 +354,7 @@ void test_vloxseg8ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_tu(
@@ -379,7 +379,7 @@ void test_vloxseg8ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_tu(
@@ -404,7 +404,7 @@ void test_vloxseg8ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_tu(
@@ -429,7 +429,7 @@ void test_vloxseg8ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_tu(
@@ -454,7 +454,7 @@ void test_vloxseg8ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_tu(
@@ -479,7 +479,7 @@ void test_vloxseg8ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_tu(
@@ -504,7 +504,7 @@ void test_vloxseg8ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_tu(
@@ -529,7 +529,7 @@ void test_vloxseg8ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_tu(
@@ -554,7 +554,7 @@ void test_vloxseg8ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_tu(
@@ -579,7 +579,7 @@ void test_vloxseg8ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_tu(
@@ -604,7 +604,7 @@ void test_vloxseg8ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_tu(
@@ -629,7 +629,7 @@ void test_vloxseg8ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_tu(
@@ -654,7 +654,7 @@ void test_vloxseg8ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_tum(
@@ -679,7 +679,7 @@ void test_vloxseg8ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_tum(
@@ -704,7 +704,7 @@ void test_vloxseg8ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_tum(
@@ -729,7 +729,7 @@ void test_vloxseg8ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_tum(
@@ -754,7 +754,7 @@ void test_vloxseg8ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_tum(
@@ -779,7 +779,7 @@ void test_vloxseg8ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_tum(
@@ -804,7 +804,7 @@ void test_vloxseg8ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_tum(
@@ -829,7 +829,7 @@ void test_vloxseg8ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_tum(
@@ -854,7 +854,7 @@ void test_vloxseg8ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_tum(
@@ -879,7 +879,7 @@ void test_vloxseg8ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_tum(
@@ -904,7 +904,7 @@ void test_vloxseg8ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_tum(
@@ -929,7 +929,7 @@ void test_vloxseg8ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_tum(
@@ -954,7 +954,7 @@ void test_vloxseg8ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_tum(
@@ -979,7 +979,7 @@ void test_vloxseg8ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_tum(
@@ -1004,7 +1004,7 @@ void test_vloxseg8ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_tum(
@@ -1029,7 +1029,7 @@ void test_vloxseg8ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg8ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_tum(
@@ -1079,7 +1079,7 @@ void test_vloxseg8ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_tum(
@@ -1104,7 +1104,7 @@ void test_vloxseg8ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_tum(
@@ -1129,7 +1129,7 @@ void test_vloxseg8ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_tum(
@@ -1154,7 +1154,7 @@ void test_vloxseg8ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_tum(
@@ -1179,7 +1179,7 @@ void test_vloxseg8ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_tum(
@@ -1204,7 +1204,7 @@ void test_vloxseg8ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_tum(
@@ -1229,7 +1229,7 @@ void test_vloxseg8ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_tum(
@@ -1254,7 +1254,7 @@ void test_vloxseg8ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_tum(
@@ -1279,7 +1279,7 @@ void test_vloxseg8ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_tum(
@@ -1304,7 +1304,7 @@ void test_vloxseg8ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_tumu(
@@ -1329,7 +1329,7 @@ void test_vloxseg8ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vloxseg8ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_tumu(
@@ -1379,7 +1379,7 @@ void test_vloxseg8ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_tumu(
@@ -1404,7 +1404,7 @@ void test_vloxseg8ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg8ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_tumu(
@@ -1454,7 +1454,7 @@ void test_vloxseg8ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_tumu(
@@ -1479,7 +1479,7 @@ void test_vloxseg8ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_tumu(
@@ -1504,7 +1504,7 @@ void test_vloxseg8ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_tumu(
@@ -1529,7 +1529,7 @@ void test_vloxseg8ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_tumu(
@@ -1554,7 +1554,7 @@ void test_vloxseg8ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg8ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_tumu(
@@ -1604,7 +1604,7 @@ void test_vloxseg8ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vloxseg8ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_tumu(
@@ -1654,7 +1654,7 @@ void test_vloxseg8ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_tumu(
@@ -1679,7 +1679,7 @@ void test_vloxseg8ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_tumu(
@@ -1704,7 +1704,7 @@ void test_vloxseg8ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_tumu(
@@ -1729,7 +1729,7 @@ void test_vloxseg8ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_tumu(
@@ -1754,7 +1754,7 @@ void test_vloxseg8ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_tumu(
@@ -1779,7 +1779,7 @@ void test_vloxseg8ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_tumu(
@@ -1804,7 +1804,7 @@ void test_vloxseg8ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_tumu(
@@ -1829,7 +1829,7 @@ void test_vloxseg8ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_tumu(
@@ -1854,7 +1854,7 @@ void test_vloxseg8ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_tumu(
@@ -1879,7 +1879,7 @@ void test_vloxseg8ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_tumu(
@@ -1904,7 +1904,7 @@ void test_vloxseg8ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_tumu(
@@ -1929,7 +1929,7 @@ void test_vloxseg8ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_tumu(
@@ -1954,7 +1954,7 @@ void test_vloxseg8ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_mu(
@@ -1979,7 +1979,7 @@ void test_vloxseg8ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_mu(
@@ -2004,7 +2004,7 @@ void test_vloxseg8ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_mu(
@@ -2029,7 +2029,7 @@ void test_vloxseg8ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_mu(
@@ -2054,7 +2054,7 @@ void test_vloxseg8ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_mu(
@@ -2079,7 +2079,7 @@ void test_vloxseg8ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg8ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_mu(
@@ -2129,7 +2129,7 @@ void test_vloxseg8ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_mu(
@@ -2154,7 +2154,7 @@ void test_vloxseg8ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_mu(
@@ -2179,7 +2179,7 @@ void test_vloxseg8ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_mu(
@@ -2204,7 +2204,7 @@ void test_vloxseg8ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_mu(
@@ -2229,7 +2229,7 @@ void test_vloxseg8ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_mu(
@@ -2254,7 +2254,7 @@ void test_vloxseg8ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vloxseg8ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vloxseg8ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_mu(
@@ -2329,7 +2329,7 @@ void test_vloxseg8ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_mu(
@@ -2354,7 +2354,7 @@ void test_vloxseg8ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_mu(
@@ -2379,7 +2379,7 @@ void test_vloxseg8ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_mu(
@@ -2404,7 +2404,7 @@ void test_vloxseg8ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_mu(
@@ -2429,7 +2429,7 @@ void test_vloxseg8ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_mu(
@@ -2454,7 +2454,7 @@ void test_vloxseg8ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_mu(
@@ -2479,7 +2479,7 @@ void test_vloxseg8ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_mu(
@@ -2504,7 +2504,7 @@ void test_vloxseg8ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_mu(
@@ -2529,7 +2529,7 @@ void test_vloxseg8ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_mu(
@@ -2554,7 +2554,7 @@ void test_vloxseg8ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_mu(
@@ -2579,7 +2579,7 @@ void test_vloxseg8ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_mu(
@@ -2604,6 +2604,6 @@ void test_vloxseg8ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c
index 2712547c2a4e..04fc48a07f5f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vloxseg8ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vloxseg8ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_tu(
@@ -104,7 +104,7 @@ void test_vloxseg8ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_tu(
@@ -129,7 +129,7 @@ void test_vloxseg8ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_tu(
@@ -154,7 +154,7 @@ void test_vloxseg8ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_tu(
@@ -179,7 +179,7 @@ void test_vloxseg8ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_tu(
@@ -204,7 +204,7 @@ void test_vloxseg8ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_tu(
@@ -229,7 +229,7 @@ void test_vloxseg8ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_tu(
@@ -254,7 +254,7 @@ void test_vloxseg8ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_tu(
@@ -279,7 +279,7 @@ void test_vloxseg8ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_tu(
@@ -304,7 +304,7 @@ void test_vloxseg8ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_tu(
@@ -329,7 +329,7 @@ void test_vloxseg8ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_tu(
@@ -354,7 +354,7 @@ void test_vloxseg8ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_tu(
@@ -379,7 +379,7 @@ void test_vloxseg8ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_tu(
@@ -404,7 +404,7 @@ void test_vloxseg8ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_tu(
@@ -429,7 +429,7 @@ void test_vloxseg8ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_tu(
@@ -454,7 +454,7 @@ void test_vloxseg8ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_tu(
@@ -479,7 +479,7 @@ void test_vloxseg8ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_tu(
@@ -504,7 +504,7 @@ void test_vloxseg8ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_tu(
@@ -529,7 +529,7 @@ void test_vloxseg8ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_tu(
@@ -554,7 +554,7 @@ void test_vloxseg8ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_tu(
@@ -579,7 +579,7 @@ void test_vloxseg8ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_tu(
@@ -604,7 +604,7 @@ void test_vloxseg8ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_tu(
@@ -629,7 +629,7 @@ void test_vloxseg8ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_tu(
@@ -654,7 +654,7 @@ void test_vloxseg8ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_tum(
@@ -679,7 +679,7 @@ void test_vloxseg8ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_tum(
@@ -704,7 +704,7 @@ void test_vloxseg8ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_tum(
@@ -729,7 +729,7 @@ void test_vloxseg8ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_tum(
@@ -754,7 +754,7 @@ void test_vloxseg8ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_tum(
@@ -779,7 +779,7 @@ void test_vloxseg8ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_tum(
@@ -804,7 +804,7 @@ void test_vloxseg8ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_tum(
@@ -829,7 +829,7 @@ void test_vloxseg8ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_tum(
@@ -854,7 +854,7 @@ void test_vloxseg8ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_tum(
@@ -879,7 +879,7 @@ void test_vloxseg8ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_tum(
@@ -904,7 +904,7 @@ void test_vloxseg8ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_tum(
@@ -929,7 +929,7 @@ void test_vloxseg8ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_tum(
@@ -954,7 +954,7 @@ void test_vloxseg8ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_tum(
@@ -979,7 +979,7 @@ void test_vloxseg8ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_tum(
@@ -1004,7 +1004,7 @@ void test_vloxseg8ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_tum(
@@ -1029,7 +1029,7 @@ void test_vloxseg8ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg8ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_tum(
@@ -1079,7 +1079,7 @@ void test_vloxseg8ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_tum(
@@ -1104,7 +1104,7 @@ void test_vloxseg8ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_tum(
@@ -1129,7 +1129,7 @@ void test_vloxseg8ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_tum(
@@ -1154,7 +1154,7 @@ void test_vloxseg8ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_tum(
@@ -1179,7 +1179,7 @@ void test_vloxseg8ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_tum(
@@ -1204,7 +1204,7 @@ void test_vloxseg8ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_tum(
@@ -1229,7 +1229,7 @@ void test_vloxseg8ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_tum(
@@ -1254,7 +1254,7 @@ void test_vloxseg8ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_tum(
@@ -1279,7 +1279,7 @@ void test_vloxseg8ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_tum(
@@ -1304,7 +1304,7 @@ void test_vloxseg8ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_tumu(
@@ -1329,7 +1329,7 @@ void test_vloxseg8ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vloxseg8ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_tumu(
@@ -1379,7 +1379,7 @@ void test_vloxseg8ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_tumu(
@@ -1404,7 +1404,7 @@ void test_vloxseg8ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg8ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_tumu(
@@ -1454,7 +1454,7 @@ void test_vloxseg8ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_tumu(
@@ -1479,7 +1479,7 @@ void test_vloxseg8ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_tumu(
@@ -1504,7 +1504,7 @@ void test_vloxseg8ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_tumu(
@@ -1529,7 +1529,7 @@ void test_vloxseg8ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_tumu(
@@ -1554,7 +1554,7 @@ void test_vloxseg8ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg8ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_tumu(
@@ -1604,7 +1604,7 @@ void test_vloxseg8ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vloxseg8ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_tumu(
@@ -1654,7 +1654,7 @@ void test_vloxseg8ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_tumu(
@@ -1679,7 +1679,7 @@ void test_vloxseg8ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_tumu(
@@ -1704,7 +1704,7 @@ void test_vloxseg8ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_tumu(
@@ -1729,7 +1729,7 @@ void test_vloxseg8ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_tumu(
@@ -1754,7 +1754,7 @@ void test_vloxseg8ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_tumu(
@@ -1779,7 +1779,7 @@ void test_vloxseg8ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_tumu(
@@ -1804,7 +1804,7 @@ void test_vloxseg8ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_tumu(
@@ -1829,7 +1829,7 @@ void test_vloxseg8ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_tumu(
@@ -1854,7 +1854,7 @@ void test_vloxseg8ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_tumu(
@@ -1879,7 +1879,7 @@ void test_vloxseg8ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_tumu(
@@ -1904,7 +1904,7 @@ void test_vloxseg8ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_tumu(
@@ -1929,7 +1929,7 @@ void test_vloxseg8ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_tumu(
@@ -1954,7 +1954,7 @@ void test_vloxseg8ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_mu(
@@ -1979,7 +1979,7 @@ void test_vloxseg8ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_mu(
@@ -2004,7 +2004,7 @@ void test_vloxseg8ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_mu(
@@ -2029,7 +2029,7 @@ void test_vloxseg8ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_mu(
@@ -2054,7 +2054,7 @@ void test_vloxseg8ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_mu(
@@ -2079,7 +2079,7 @@ void test_vloxseg8ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg8ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_mu(
@@ -2129,7 +2129,7 @@ void test_vloxseg8ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_mu(
@@ -2154,7 +2154,7 @@ void test_vloxseg8ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_mu(
@@ -2179,7 +2179,7 @@ void test_vloxseg8ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_mu(
@@ -2204,7 +2204,7 @@ void test_vloxseg8ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_mu(
@@ -2229,7 +2229,7 @@ void test_vloxseg8ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_mu(
@@ -2254,7 +2254,7 @@ void test_vloxseg8ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vloxseg8ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vloxseg8ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_mu(
@@ -2329,7 +2329,7 @@ void test_vloxseg8ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_mu(
@@ -2354,7 +2354,7 @@ void test_vloxseg8ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_mu(
@@ -2379,7 +2379,7 @@ void test_vloxseg8ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_mu(
@@ -2404,7 +2404,7 @@ void test_vloxseg8ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_mu(
@@ -2429,7 +2429,7 @@ void test_vloxseg8ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_mu(
@@ -2454,7 +2454,7 @@ void test_vloxseg8ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_mu(
@@ -2479,7 +2479,7 @@ void test_vloxseg8ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_mu(
@@ -2504,7 +2504,7 @@ void test_vloxseg8ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_mu(
@@ -2529,7 +2529,7 @@ void test_vloxseg8ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_mu(
@@ -2554,7 +2554,7 @@ void test_vloxseg8ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_mu(
@@ -2579,7 +2579,7 @@ void test_vloxseg8ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_mu(
@@ -2604,6 +2604,6 @@ void test_vloxseg8ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c
index 50030181abac..6f49ea97b0ac 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vloxseg8ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vloxseg8ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_tu(
@@ -104,7 +104,7 @@ void test_vloxseg8ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_tu(
@@ -129,7 +129,7 @@ void test_vloxseg8ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_tu(
@@ -154,7 +154,7 @@ void test_vloxseg8ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_tu(
@@ -179,7 +179,7 @@ void test_vloxseg8ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_tu(
@@ -204,7 +204,7 @@ void test_vloxseg8ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_tu(
@@ -229,7 +229,7 @@ void test_vloxseg8ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_tu(
@@ -254,7 +254,7 @@ void test_vloxseg8ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_tu(
@@ -279,7 +279,7 @@ void test_vloxseg8ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_tu(
@@ -304,7 +304,7 @@ void test_vloxseg8ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_tu(
@@ -329,7 +329,7 @@ void test_vloxseg8ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_tu(
@@ -354,7 +354,7 @@ void test_vloxseg8ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_tu(
@@ -379,7 +379,7 @@ void test_vloxseg8ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_tu(
@@ -404,7 +404,7 @@ void test_vloxseg8ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_tu(
@@ -429,7 +429,7 @@ void test_vloxseg8ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_tu(
@@ -454,7 +454,7 @@ void test_vloxseg8ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_tu(
@@ -479,7 +479,7 @@ void test_vloxseg8ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_tu(
@@ -504,7 +504,7 @@ void test_vloxseg8ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_tu(
@@ -529,7 +529,7 @@ void test_vloxseg8ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_tu(
@@ -554,7 +554,7 @@ void test_vloxseg8ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_tu(
@@ -579,7 +579,7 @@ void test_vloxseg8ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_tu(
@@ -604,7 +604,7 @@ void test_vloxseg8ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_tu(
@@ -629,7 +629,7 @@ void test_vloxseg8ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_tu(
@@ -654,7 +654,7 @@ void test_vloxseg8ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_tum(
@@ -679,7 +679,7 @@ void test_vloxseg8ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_tum(
@@ -704,7 +704,7 @@ void test_vloxseg8ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_tum(
@@ -729,7 +729,7 @@ void test_vloxseg8ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_tum(
@@ -754,7 +754,7 @@ void test_vloxseg8ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_tum(
@@ -779,7 +779,7 @@ void test_vloxseg8ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_tum(
@@ -804,7 +804,7 @@ void test_vloxseg8ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_tum(
@@ -829,7 +829,7 @@ void test_vloxseg8ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_tum(
@@ -854,7 +854,7 @@ void test_vloxseg8ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_tum(
@@ -879,7 +879,7 @@ void test_vloxseg8ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_tum(
@@ -904,7 +904,7 @@ void test_vloxseg8ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_tum(
@@ -929,7 +929,7 @@ void test_vloxseg8ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_tum(
@@ -954,7 +954,7 @@ void test_vloxseg8ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_tum(
@@ -979,7 +979,7 @@ void test_vloxseg8ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_tum(
@@ -1004,7 +1004,7 @@ void test_vloxseg8ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_tum(
@@ -1029,7 +1029,7 @@ void test_vloxseg8ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg8ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_tum(
@@ -1079,7 +1079,7 @@ void test_vloxseg8ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_tum(
@@ -1104,7 +1104,7 @@ void test_vloxseg8ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_tum(
@@ -1129,7 +1129,7 @@ void test_vloxseg8ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_tum(
@@ -1154,7 +1154,7 @@ void test_vloxseg8ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_tum(
@@ -1179,7 +1179,7 @@ void test_vloxseg8ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_tum(
@@ -1204,7 +1204,7 @@ void test_vloxseg8ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_tum(
@@ -1229,7 +1229,7 @@ void test_vloxseg8ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_tum(
@@ -1254,7 +1254,7 @@ void test_vloxseg8ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_tum(
@@ -1279,7 +1279,7 @@ void test_vloxseg8ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_tum(
@@ -1304,7 +1304,7 @@ void test_vloxseg8ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_tumu(
@@ -1329,7 +1329,7 @@ void test_vloxseg8ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vloxseg8ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_tumu(
@@ -1379,7 +1379,7 @@ void test_vloxseg8ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_tumu(
@@ -1404,7 +1404,7 @@ void test_vloxseg8ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg8ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_tumu(
@@ -1454,7 +1454,7 @@ void test_vloxseg8ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_tumu(
@@ -1479,7 +1479,7 @@ void test_vloxseg8ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_tumu(
@@ -1504,7 +1504,7 @@ void test_vloxseg8ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_tumu(
@@ -1529,7 +1529,7 @@ void test_vloxseg8ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_tumu(
@@ -1554,7 +1554,7 @@ void test_vloxseg8ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg8ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_tumu(
@@ -1604,7 +1604,7 @@ void test_vloxseg8ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vloxseg8ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_tumu(
@@ -1654,7 +1654,7 @@ void test_vloxseg8ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_tumu(
@@ -1679,7 +1679,7 @@ void test_vloxseg8ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_tumu(
@@ -1704,7 +1704,7 @@ void test_vloxseg8ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_tumu(
@@ -1729,7 +1729,7 @@ void test_vloxseg8ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_tumu(
@@ -1754,7 +1754,7 @@ void test_vloxseg8ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_tumu(
@@ -1779,7 +1779,7 @@ void test_vloxseg8ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_tumu(
@@ -1804,7 +1804,7 @@ void test_vloxseg8ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_tumu(
@@ -1829,7 +1829,7 @@ void test_vloxseg8ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_tumu(
@@ -1854,7 +1854,7 @@ void test_vloxseg8ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_tumu(
@@ -1879,7 +1879,7 @@ void test_vloxseg8ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_tumu(
@@ -1904,7 +1904,7 @@ void test_vloxseg8ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_tumu(
@@ -1929,7 +1929,7 @@ void test_vloxseg8ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_tumu(
@@ -1954,7 +1954,7 @@ void test_vloxseg8ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_mu(
@@ -1979,7 +1979,7 @@ void test_vloxseg8ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_mu(
@@ -2004,7 +2004,7 @@ void test_vloxseg8ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_mu(
@@ -2029,7 +2029,7 @@ void test_vloxseg8ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_mu(
@@ -2054,7 +2054,7 @@ void test_vloxseg8ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_mu(
@@ -2079,7 +2079,7 @@ void test_vloxseg8ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg8ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_mu(
@@ -2129,7 +2129,7 @@ void test_vloxseg8ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_mu(
@@ -2154,7 +2154,7 @@ void test_vloxseg8ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_mu(
@@ -2179,7 +2179,7 @@ void test_vloxseg8ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_mu(
@@ -2204,7 +2204,7 @@ void test_vloxseg8ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_mu(
@@ -2229,7 +2229,7 @@ void test_vloxseg8ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_mu(
@@ -2254,7 +2254,7 @@ void test_vloxseg8ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vloxseg8ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vloxseg8ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_mu(
@@ -2329,7 +2329,7 @@ void test_vloxseg8ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_mu(
@@ -2354,7 +2354,7 @@ void test_vloxseg8ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_mu(
@@ -2379,7 +2379,7 @@ void test_vloxseg8ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_mu(
@@ -2404,7 +2404,7 @@ void test_vloxseg8ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_mu(
@@ -2429,7 +2429,7 @@ void test_vloxseg8ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_mu(
@@ -2454,7 +2454,7 @@ void test_vloxseg8ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_mu(
@@ -2479,7 +2479,7 @@ void test_vloxseg8ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_mu(
@@ -2504,7 +2504,7 @@ void test_vloxseg8ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_mu(
@@ -2529,7 +2529,7 @@ void test_vloxseg8ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_mu(
@@ -2554,7 +2554,7 @@ void test_vloxseg8ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_mu(
@@ -2579,7 +2579,7 @@ void test_vloxseg8ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_mu(
@@ -2604,6 +2604,6 @@ void test_vloxseg8ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c
index f46894d298b9..afa63146be64 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vloxseg8ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vloxseg8ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_tu(
@@ -104,7 +104,7 @@ void test_vloxseg8ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_tu(
@@ -129,7 +129,7 @@ void test_vloxseg8ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_tu(
@@ -154,7 +154,7 @@ void test_vloxseg8ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_tu(
@@ -179,7 +179,7 @@ void test_vloxseg8ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_tu(
@@ -204,7 +204,7 @@ void test_vloxseg8ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_tu(
@@ -229,7 +229,7 @@ void test_vloxseg8ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_tu(
@@ -254,7 +254,7 @@ void test_vloxseg8ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_tu(
@@ -279,7 +279,7 @@ void test_vloxseg8ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_tu(
@@ -304,7 +304,7 @@ void test_vloxseg8ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_tu(
@@ -329,7 +329,7 @@ void test_vloxseg8ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_tu(
@@ -354,7 +354,7 @@ void test_vloxseg8ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_tu(
@@ -379,7 +379,7 @@ void test_vloxseg8ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_tu(
@@ -404,7 +404,7 @@ void test_vloxseg8ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_tu(
@@ -429,7 +429,7 @@ void test_vloxseg8ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_tu(
@@ -454,7 +454,7 @@ void test_vloxseg8ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_tu(
@@ -479,7 +479,7 @@ void test_vloxseg8ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_tu(
@@ -504,7 +504,7 @@ void test_vloxseg8ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_tu(
@@ -529,7 +529,7 @@ void test_vloxseg8ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_tu(
@@ -554,7 +554,7 @@ void test_vloxseg8ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_tu(
@@ -579,7 +579,7 @@ void test_vloxseg8ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_tu(
@@ -604,7 +604,7 @@ void test_vloxseg8ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_tu(
@@ -629,7 +629,7 @@ void test_vloxseg8ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_tu(
@@ -654,7 +654,7 @@ void test_vloxseg8ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_tum(
@@ -679,7 +679,7 @@ void test_vloxseg8ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_tum(
@@ -704,7 +704,7 @@ void test_vloxseg8ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_tum(
@@ -729,7 +729,7 @@ void test_vloxseg8ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_tum(
@@ -754,7 +754,7 @@ void test_vloxseg8ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_tum(
@@ -779,7 +779,7 @@ void test_vloxseg8ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_tum(
@@ -804,7 +804,7 @@ void test_vloxseg8ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_tum(
@@ -829,7 +829,7 @@ void test_vloxseg8ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_tum(
@@ -854,7 +854,7 @@ void test_vloxseg8ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_tum(
@@ -879,7 +879,7 @@ void test_vloxseg8ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_tum(
@@ -904,7 +904,7 @@ void test_vloxseg8ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_tum(
@@ -929,7 +929,7 @@ void test_vloxseg8ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_tum(
@@ -954,7 +954,7 @@ void test_vloxseg8ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_tum(
@@ -979,7 +979,7 @@ void test_vloxseg8ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_tum(
@@ -1004,7 +1004,7 @@ void test_vloxseg8ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_tum(
@@ -1029,7 +1029,7 @@ void test_vloxseg8ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_tum(
@@ -1054,7 +1054,7 @@ void test_vloxseg8ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_tum(
@@ -1079,7 +1079,7 @@ void test_vloxseg8ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_tum(
@@ -1104,7 +1104,7 @@ void test_vloxseg8ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_tum(
@@ -1129,7 +1129,7 @@ void test_vloxseg8ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_tum(
@@ -1154,7 +1154,7 @@ void test_vloxseg8ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_tum(
@@ -1179,7 +1179,7 @@ void test_vloxseg8ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_tum(
@@ -1204,7 +1204,7 @@ void test_vloxseg8ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_tum(
@@ -1229,7 +1229,7 @@ void test_vloxseg8ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_tum(
@@ -1254,7 +1254,7 @@ void test_vloxseg8ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_tum(
@@ -1279,7 +1279,7 @@ void test_vloxseg8ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_tum(
@@ -1304,7 +1304,7 @@ void test_vloxseg8ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_tumu(
@@ -1329,7 +1329,7 @@ void test_vloxseg8ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vloxseg8ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_tumu(
@@ -1379,7 +1379,7 @@ void test_vloxseg8ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_tumu(
@@ -1404,7 +1404,7 @@ void test_vloxseg8ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vloxseg8ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_tumu(
@@ -1454,7 +1454,7 @@ void test_vloxseg8ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_tumu(
@@ -1479,7 +1479,7 @@ void test_vloxseg8ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_tumu(
@@ -1504,7 +1504,7 @@ void test_vloxseg8ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_tumu(
@@ -1529,7 +1529,7 @@ void test_vloxseg8ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_tumu(
@@ -1554,7 +1554,7 @@ void test_vloxseg8ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_tumu(
@@ -1579,7 +1579,7 @@ void test_vloxseg8ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_tumu(
@@ -1604,7 +1604,7 @@ void test_vloxseg8ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vloxseg8ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_tumu(
@@ -1654,7 +1654,7 @@ void test_vloxseg8ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_tumu(
@@ -1679,7 +1679,7 @@ void test_vloxseg8ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_tumu(
@@ -1704,7 +1704,7 @@ void test_vloxseg8ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_tumu(
@@ -1729,7 +1729,7 @@ void test_vloxseg8ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_tumu(
@@ -1754,7 +1754,7 @@ void test_vloxseg8ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_tumu(
@@ -1779,7 +1779,7 @@ void test_vloxseg8ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_tumu(
@@ -1804,7 +1804,7 @@ void test_vloxseg8ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_tumu(
@@ -1829,7 +1829,7 @@ void test_vloxseg8ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_tumu(
@@ -1854,7 +1854,7 @@ void test_vloxseg8ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_tumu(
@@ -1879,7 +1879,7 @@ void test_vloxseg8ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_tumu(
@@ -1904,7 +1904,7 @@ void test_vloxseg8ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_tumu(
@@ -1929,7 +1929,7 @@ void test_vloxseg8ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_tumu(
@@ -1954,7 +1954,7 @@ void test_vloxseg8ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_mu(
@@ -1979,7 +1979,7 @@ void test_vloxseg8ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_mu(
@@ -2004,7 +2004,7 @@ void test_vloxseg8ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_mu(
@@ -2029,7 +2029,7 @@ void test_vloxseg8ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_mu(
@@ -2054,7 +2054,7 @@ void test_vloxseg8ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_mu(
@@ -2079,7 +2079,7 @@ void test_vloxseg8ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_mu(
@@ -2104,7 +2104,7 @@ void test_vloxseg8ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_mu(
@@ -2129,7 +2129,7 @@ void test_vloxseg8ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_mu(
@@ -2154,7 +2154,7 @@ void test_vloxseg8ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_mu(
@@ -2179,7 +2179,7 @@ void test_vloxseg8ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_mu(
@@ -2204,7 +2204,7 @@ void test_vloxseg8ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_mu(
@@ -2229,7 +2229,7 @@ void test_vloxseg8ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_mu(
@@ -2254,7 +2254,7 @@ void test_vloxseg8ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vloxseg8ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vloxseg8ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_mu(
@@ -2329,7 +2329,7 @@ void test_vloxseg8ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_mu(
@@ -2354,7 +2354,7 @@ void test_vloxseg8ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_mu(
@@ -2379,7 +2379,7 @@ void test_vloxseg8ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_mu(
@@ -2404,7 +2404,7 @@ void test_vloxseg8ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_mu(
@@ -2429,7 +2429,7 @@ void test_vloxseg8ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_mu(
@@ -2454,7 +2454,7 @@ void test_vloxseg8ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_mu(
@@ -2479,7 +2479,7 @@ void test_vloxseg8ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_mu(
@@ -2504,7 +2504,7 @@ void test_vloxseg8ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_mu(
@@ -2529,7 +2529,7 @@ void test_vloxseg8ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_mu(
@@ -2554,7 +2554,7 @@ void test_vloxseg8ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_mu(
@@ -2579,7 +2579,7 @@ void test_vloxseg8ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_mu(
@@ -2604,6 +2604,6 @@ void test_vloxseg8ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vloxseg8ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vloxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse16.c
index 73f96750a0ff..9e96f9f9574c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *b
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *b
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_tu(
@@ -76,7 +76,7 @@ vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_tu(
@@ -85,7 +85,7 @@ vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_tu(
@@ -94,7 +94,7 @@ vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_tu(
@@ -103,7 +103,7 @@ vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_tu(
@@ -112,7 +112,7 @@ vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_tu(
@@ -121,7 +121,7 @@ vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_tu(
@@ -130,7 +130,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *bas
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_tu(
@@ -139,7 +139,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *bas
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_tu(
@@ -148,7 +148,7 @@ vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_tu(
@@ -157,7 +157,7 @@ vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_tu(
@@ -166,7 +166,7 @@ vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_tum(
@@ -175,7 +175,7 @@ vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_tum(
@@ -184,7 +184,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_tum(
@@ -193,7 +193,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_tum(
@@ -202,7 +202,7 @@ vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_tum(
@@ -211,7 +211,7 @@ vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_tum(
@@ -220,7 +220,7 @@ vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_tum(
@@ -229,7 +229,7 @@ vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_tum(
@@ -238,7 +238,7 @@ vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_tum(
@@ -247,7 +247,7 @@ vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_tum(
@@ -256,7 +256,7 @@ vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_tum(
@@ -265,7 +265,7 @@ vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_tum(
@@ -274,7 +274,7 @@ vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_tum(
@@ -283,7 +283,7 @@ vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_tum(
@@ -292,7 +292,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_tum(
@@ -301,7 +301,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_tum(
@@ -310,7 +310,7 @@ vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_tum(
@@ -319,7 +319,7 @@ vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_tum(
@@ -328,7 +328,7 @@ vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_tumu(
@@ -337,7 +337,7 @@ vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_tumu(
@@ -346,7 +346,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_tumu(
@@ -355,7 +355,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_tumu(
@@ -364,7 +364,7 @@ vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_tumu(
@@ -373,7 +373,7 @@ vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_tumu(
@@ -382,7 +382,7 @@ vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_tumu(
@@ -391,7 +391,7 @@ vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_tumu(
@@ -400,7 +400,7 @@ vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_tumu(
@@ -409,7 +409,7 @@ vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_tumu(
@@ -418,7 +418,7 @@ vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_tumu(
@@ -427,7 +427,7 @@ vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_tumu(
@@ -436,7 +436,7 @@ vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_tumu(
@@ -445,7 +445,7 @@ vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_tumu(
@@ -454,7 +454,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_tumu(
@@ -463,7 +463,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_tumu(
@@ -472,7 +472,7 @@ vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_tumu(
@@ -481,7 +481,7 @@ vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_tumu(
@@ -490,6 +490,6 @@ vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse16_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse32.c
index 918882179378..bbd034caa281 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_tu(
@@ -22,7 +22,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_tu(
@@ -31,7 +31,7 @@ vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, p
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_tu(
@@ -40,7 +40,7 @@ vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, p
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_tu(
@@ -49,7 +49,7 @@ vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, p
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_tu(
@@ -58,7 +58,7 @@ vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, p
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_tu(
@@ -67,7 +67,7 @@ vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_tu(
@@ -76,7 +76,7 @@ vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_tu(
@@ -85,7 +85,7 @@ vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_tu(
@@ -94,7 +94,7 @@ vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_tu(
@@ -103,7 +103,7 @@ vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_tu(
@@ -112,7 +112,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *bas
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_tu(
@@ -121,7 +121,7 @@ vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_tu(
@@ -130,7 +130,7 @@ vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_tu(
@@ -139,7 +139,7 @@ vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_tum(
@@ -148,7 +148,7 @@ vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_tum(
@@ -157,7 +157,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_tum(
@@ -166,7 +166,7 @@ vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_tum(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_tum(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_tum(
@@ -193,7 +193,7 @@ vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_tum(
@@ -202,7 +202,7 @@ vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_tum(
@@ -211,7 +211,7 @@ vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_tum(
@@ -220,7 +220,7 @@ vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_tum(
@@ -229,7 +229,7 @@ vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_tum(
@@ -238,7 +238,7 @@ vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_tum(
@@ -247,7 +247,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_tum(
@@ -256,7 +256,7 @@ vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_tum(
@@ -265,7 +265,7 @@ vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_tum(
@@ -274,7 +274,7 @@ vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_tumu(
@@ -283,7 +283,7 @@ vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_tumu(
@@ -292,7 +292,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_tumu(
@@ -301,7 +301,7 @@ vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_tumu(
@@ -310,7 +310,7 @@ vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_tumu(
@@ -319,7 +319,7 @@ vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_tumu(
@@ -328,7 +328,7 @@ vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_tumu(
@@ -337,7 +337,7 @@ vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_tumu(
@@ -346,7 +346,7 @@ vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_tumu(
@@ -355,7 +355,7 @@ vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_tumu(
@@ -364,7 +364,7 @@ vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_tumu(
@@ -373,7 +373,7 @@ vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_tumu(
@@ -382,7 +382,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_tumu(
@@ -391,7 +391,7 @@ vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_tumu(
@@ -400,7 +400,7 @@ vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_tumu(
@@ -409,6 +409,6 @@ vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse32_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse64.c
index 1f2cdfee45db..95b273bb6d25 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_tu(
@@ -22,7 +22,7 @@ vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_tu(
@@ -31,7 +31,7 @@ vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_tu(
@@ -40,7 +40,7 @@ vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_tu(
@@ -49,7 +49,7 @@ vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_tu(
@@ -58,7 +58,7 @@ vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_tu(
@@ -67,7 +67,7 @@ vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_tu(
@@ -76,7 +76,7 @@ vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_tu(
@@ -85,7 +85,7 @@ vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, ptr
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_tu(
@@ -94,7 +94,7 @@ vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_tu(
@@ -103,7 +103,7 @@ vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_tu(
@@ -112,7 +112,7 @@ vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m1_tum(
@@ -121,7 +121,7 @@ vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_tum(
@@ -130,7 +130,7 @@ vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_tum(
@@ -139,7 +139,7 @@ vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_tum(
@@ -148,7 +148,7 @@ vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_tum(
@@ -157,7 +157,7 @@ vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_tum(
@@ -166,7 +166,7 @@ vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_tum(
@@ -175,7 +175,7 @@ vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_tum(
@@ -184,7 +184,7 @@ vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_tum(
@@ -193,7 +193,7 @@ vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_tum(
@@ -202,7 +202,7 @@ vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_tum(
@@ -211,7 +211,7 @@ vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_tum(
@@ -220,7 +220,7 @@ vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m1_tumu(
@@ -229,7 +229,7 @@ vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_tumu(
@@ -238,7 +238,7 @@ vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_tumu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_tumu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_tumu(
@@ -265,7 +265,7 @@ vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_tumu(
@@ -274,7 +274,7 @@ vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_tumu(
@@ -283,7 +283,7 @@ vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_tumu(
@@ -292,7 +292,7 @@ vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_tumu(
@@ -301,7 +301,7 @@ vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_tumu(
@@ -310,7 +310,7 @@ vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_tumu(
@@ -319,7 +319,7 @@ vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_tumu(
@@ -328,6 +328,6 @@ vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse64_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse8.c
index c6efdf2d6dab..67a869ba0256 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse8.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, ptrdi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_tu(
@@ -30,7 +30,7 @@ vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, ptrdi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_tu(
@@ -39,7 +39,7 @@ vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, ptrdi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_tu(
@@ -57,7 +57,7 @@ vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_tu(
@@ -66,7 +66,7 @@ vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_tu(
@@ -75,7 +75,7 @@ vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, ptrdiff_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_tu(
@@ -84,7 +84,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, pt
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_tu(
@@ -93,7 +93,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, pt
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_tu(
@@ -102,7 +102,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, pt
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_tu(
@@ -111,7 +111,7 @@ vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, ptrdi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_tu(
@@ -120,7 +120,7 @@ vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, ptrdi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, ptrdi
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tu(maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tu(maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_tum(
@@ -138,7 +138,7 @@ vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, ptrdi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_tum(
@@ -147,7 +147,7 @@ vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_tum(
@@ -156,7 +156,7 @@ vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_tum(
@@ -165,7 +165,7 @@ vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_tum(
@@ -174,7 +174,7 @@ vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_tum(
@@ -183,7 +183,7 @@ vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_tum(
@@ -192,7 +192,7 @@ vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_tum(
@@ -201,7 +201,7 @@ vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_tum(
@@ -210,7 +210,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_tum(
@@ -219,7 +219,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_tum(
@@ -228,7 +228,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_tum(
@@ -237,7 +237,7 @@ vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_tum(
@@ -246,7 +246,7 @@ vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_tum(
@@ -255,7 +255,7 @@ vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tum(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_tumu(
@@ -264,7 +264,7 @@ vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_tumu(
@@ -273,7 +273,7 @@ vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_tumu(
@@ -282,7 +282,7 @@ vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_tumu(
@@ -291,7 +291,7 @@ vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_tumu(
@@ -300,7 +300,7 @@ vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_tumu(
@@ -309,7 +309,7 @@ vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_tumu(
@@ -318,7 +318,7 @@ vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_tumu(
@@ -327,7 +327,7 @@ vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_tumu(
@@ -336,7 +336,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_tumu(
@@ -345,7 +345,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_tumu(
@@ -354,7 +354,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_tumu(
@@ -363,7 +363,7 @@ vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_tumu(
@@ -372,7 +372,7 @@ vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_tumu(
@@ -381,6 +381,6 @@ vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlse8_tumu(mask, maskedoff, base, bstride, vl);
+ return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16.c
index 703a9808cd57..a99bb5ef3f78 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vlseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vlseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vlseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_tu(
@@ -69,7 +69,7 @@ void test_vlseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_tu(
@@ -82,7 +82,7 @@ void test_vlseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_tu(
@@ -95,7 +95,7 @@ void test_vlseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_tu(
@@ -108,7 +108,7 @@ void test_vlseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_tu(
@@ -121,7 +121,7 @@ void test_vlseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_tu(
@@ -134,7 +134,7 @@ void test_vlseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_tu(
@@ -147,7 +147,7 @@ void test_vlseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_tu(
@@ -160,7 +160,7 @@ void test_vlseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_tu(
@@ -173,7 +173,7 @@ void test_vlseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_tu(
@@ -186,7 +186,7 @@ void test_vlseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_tu(
@@ -199,7 +199,7 @@ void test_vlseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_tum(
@@ -212,7 +212,7 @@ void test_vlseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_tum(
@@ -225,7 +225,7 @@ void test_vlseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_tum(
@@ -238,7 +238,7 @@ void test_vlseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_tum(
@@ -251,7 +251,7 @@ void test_vlseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_tum(
@@ -264,7 +264,7 @@ void test_vlseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_tum(
@@ -277,7 +277,7 @@ void test_vlseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_tum(
@@ -290,7 +290,7 @@ void test_vlseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_tum(
@@ -303,7 +303,7 @@ void test_vlseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_tum(
@@ -316,7 +316,7 @@ void test_vlseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_tum(
@@ -329,7 +329,7 @@ void test_vlseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_tum(
@@ -342,7 +342,7 @@ void test_vlseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_tum(
@@ -355,7 +355,7 @@ void test_vlseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_tum(
@@ -368,7 +368,7 @@ void test_vlseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_tum(
@@ -381,7 +381,7 @@ void test_vlseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_tum(
@@ -394,7 +394,7 @@ void test_vlseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_tumu(
@@ -407,7 +407,7 @@ void test_vlseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_tumu(
@@ -420,7 +420,7 @@ void test_vlseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_tumu(
@@ -433,7 +433,7 @@ void test_vlseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_tumu(
@@ -446,7 +446,7 @@ void test_vlseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_tumu(
@@ -459,7 +459,7 @@ void test_vlseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_tumu(
@@ -472,7 +472,7 @@ void test_vlseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_tumu(
@@ -485,7 +485,7 @@ void test_vlseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_tumu(
@@ -498,7 +498,7 @@ void test_vlseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_tumu(
@@ -511,7 +511,7 @@ void test_vlseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_tumu(
@@ -524,7 +524,7 @@ void test_vlseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_tumu(
@@ -537,7 +537,7 @@ void test_vlseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_tumu(
@@ -550,7 +550,7 @@ void test_vlseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_tumu(
@@ -563,7 +563,7 @@ void test_vlseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_tumu(
@@ -576,7 +576,7 @@ void test_vlseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_tumu(
@@ -589,6 +589,6 @@ void test_vlseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) {
- return vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c
index c1a88d04ccdd..a916753de0f7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vlseg2e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vlseg2e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vlseg2e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_tu(
@@ -79,7 +79,7 @@ void test_vlseg2e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_tu(
@@ -94,7 +94,7 @@ void test_vlseg2e16ff_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_tu(
@@ -109,7 +109,7 @@ void test_vlseg2e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_tu(
@@ -124,7 +124,7 @@ void test_vlseg2e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_tu(
@@ -139,7 +139,7 @@ void test_vlseg2e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_tu(
@@ -154,7 +154,7 @@ void test_vlseg2e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_tu(
@@ -169,7 +169,7 @@ void test_vlseg2e16ff_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_tu(
@@ -184,7 +184,7 @@ void test_vlseg2e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_tu(
@@ -199,7 +199,7 @@ void test_vlseg2e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_tu(
@@ -214,7 +214,7 @@ void test_vlseg2e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_tu(
@@ -229,7 +229,7 @@ void test_vlseg2e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_tum(
@@ -244,7 +244,7 @@ void test_vlseg2e16ff_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_tum(
@@ -259,7 +259,7 @@ void test_vlseg2e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_tum(
@@ -274,7 +274,7 @@ void test_vlseg2e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_tum(
@@ -289,7 +289,7 @@ void test_vlseg2e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_tum(
@@ -304,7 +304,7 @@ void test_vlseg2e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_tum(
@@ -319,7 +319,7 @@ void test_vlseg2e16ff_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_tum(
@@ -334,7 +334,7 @@ void test_vlseg2e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_tum(
@@ -349,7 +349,7 @@ void test_vlseg2e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_tum(
@@ -364,7 +364,7 @@ void test_vlseg2e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_tum(
@@ -379,7 +379,7 @@ void test_vlseg2e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_tum(
@@ -394,7 +394,7 @@ void test_vlseg2e16ff_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_tum(
@@ -409,7 +409,7 @@ void test_vlseg2e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_tum(
@@ -424,7 +424,7 @@ void test_vlseg2e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_tum(
@@ -439,7 +439,7 @@ void test_vlseg2e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_tum(
@@ -454,7 +454,7 @@ void test_vlseg2e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_tumu(
@@ -469,7 +469,7 @@ void test_vlseg2e16ff_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_tumu(
@@ -484,7 +484,7 @@ void test_vlseg2e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_tumu(
@@ -499,7 +499,7 @@ void test_vlseg2e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_tumu(
@@ -514,7 +514,7 @@ void test_vlseg2e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_tumu(
@@ -529,7 +529,7 @@ void test_vlseg2e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_tumu(
@@ -544,7 +544,7 @@ void test_vlseg2e16ff_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_tumu(
@@ -559,7 +559,7 @@ void test_vlseg2e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_tumu(
@@ -574,7 +574,7 @@ void test_vlseg2e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_tumu(
@@ -589,7 +589,7 @@ void test_vlseg2e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_tumu(
@@ -604,7 +604,7 @@ void test_vlseg2e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_tumu(
@@ -619,7 +619,7 @@ void test_vlseg2e16ff_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_tumu(
@@ -634,7 +634,7 @@ void test_vlseg2e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_tumu(
@@ -649,7 +649,7 @@ void test_vlseg2e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_tumu(
@@ -664,7 +664,7 @@ void test_vlseg2e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_tumu(
@@ -679,6 +679,6 @@ void test_vlseg2e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e16ff_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32.c
index 7da368948c70..1d5cc89e4b47 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_tu(
@@ -30,7 +30,7 @@ void test_vlseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_tu(
@@ -43,7 +43,7 @@ void test_vlseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_tu(
@@ -56,7 +56,7 @@ void test_vlseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_tu(
@@ -69,7 +69,7 @@ void test_vlseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_tu(
@@ -82,7 +82,7 @@ void test_vlseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_tu(
@@ -95,7 +95,7 @@ void test_vlseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_tu(
@@ -108,7 +108,7 @@ void test_vlseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tu(
@@ -121,7 +121,7 @@ void test_vlseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_tu(
@@ -134,7 +134,7 @@ void test_vlseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_tu(
@@ -147,7 +147,7 @@ void test_vlseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_tu(
@@ -160,7 +160,7 @@ void test_vlseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_tum(
@@ -173,7 +173,7 @@ void test_vlseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_tum(
@@ -186,7 +186,7 @@ void test_vlseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_tum(
@@ -199,7 +199,7 @@ void test_vlseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_tum(
@@ -212,7 +212,7 @@ void test_vlseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_tum(
@@ -225,7 +225,7 @@ void test_vlseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_tum(
@@ -238,7 +238,7 @@ void test_vlseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_tum(
@@ -251,7 +251,7 @@ void test_vlseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_tum(
@@ -264,7 +264,7 @@ void test_vlseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tum(
@@ -277,7 +277,7 @@ void test_vlseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_tum(
@@ -290,7 +290,7 @@ void test_vlseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_tum(
@@ -303,7 +303,7 @@ void test_vlseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_tum(
@@ -316,7 +316,7 @@ void test_vlseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_tumu(
@@ -329,7 +329,7 @@ void test_vlseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_tumu(
@@ -342,7 +342,7 @@ void test_vlseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_tumu(
@@ -355,7 +355,7 @@ void test_vlseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_tumu(
@@ -368,7 +368,7 @@ void test_vlseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_tumu(
@@ -381,7 +381,7 @@ void test_vlseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_tumu(
@@ -394,7 +394,7 @@ void test_vlseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_tumu(
@@ -407,7 +407,7 @@ void test_vlseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_tumu(
@@ -420,7 +420,7 @@ void test_vlseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tumu(
@@ -433,7 +433,7 @@ void test_vlseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_tumu(
@@ -446,7 +446,7 @@ void test_vlseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_tumu(
@@ -459,7 +459,7 @@ void test_vlseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_tumu(
@@ -472,6 +472,6 @@ void test_vlseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) {
- return vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c
index e3b7db6a8b16..d4391d6288bf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_tu(
@@ -34,7 +34,7 @@ void test_vlseg2e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_tu(
@@ -49,7 +49,7 @@ void test_vlseg2e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_tu(
@@ -64,7 +64,7 @@ void test_vlseg2e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_tu(
@@ -79,7 +79,7 @@ void test_vlseg2e32ff_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_tu(
@@ -94,7 +94,7 @@ void test_vlseg2e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_tu(
@@ -109,7 +109,7 @@ void test_vlseg2e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_tu(
@@ -124,7 +124,7 @@ void test_vlseg2e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tu(
@@ -139,7 +139,7 @@ void test_vlseg2e32ff_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_tu(
@@ -154,7 +154,7 @@ void test_vlseg2e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_tu(
@@ -169,7 +169,7 @@ void test_vlseg2e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_tu(
@@ -184,7 +184,7 @@ void test_vlseg2e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_tum(
@@ -199,7 +199,7 @@ void test_vlseg2e32ff_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_tum(
@@ -214,7 +214,7 @@ void test_vlseg2e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_tum(
@@ -229,7 +229,7 @@ void test_vlseg2e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_tum(
@@ -244,7 +244,7 @@ void test_vlseg2e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_tum(
@@ -259,7 +259,7 @@ void test_vlseg2e32ff_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_tum(
@@ -274,7 +274,7 @@ void test_vlseg2e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_tum(
@@ -289,7 +289,7 @@ void test_vlseg2e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_tum(
@@ -304,7 +304,7 @@ void test_vlseg2e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tum(
@@ -319,7 +319,7 @@ void test_vlseg2e32ff_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_tum(
@@ -334,7 +334,7 @@ void test_vlseg2e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_tum(
@@ -349,7 +349,7 @@ void test_vlseg2e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_tum(
@@ -364,7 +364,7 @@ void test_vlseg2e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_tumu(
@@ -379,7 +379,7 @@ void test_vlseg2e32ff_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_tumu(
@@ -394,7 +394,7 @@ void test_vlseg2e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_tumu(
@@ -409,7 +409,7 @@ void test_vlseg2e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_tumu(
@@ -424,7 +424,7 @@ void test_vlseg2e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_tumu(
@@ -439,7 +439,7 @@ void test_vlseg2e32ff_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_tumu(
@@ -454,7 +454,7 @@ void test_vlseg2e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_tumu(
@@ -469,7 +469,7 @@ void test_vlseg2e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_tumu(
@@ -484,7 +484,7 @@ void test_vlseg2e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tumu(
@@ -499,7 +499,7 @@ void test_vlseg2e32ff_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_tumu(
@@ -514,7 +514,7 @@ void test_vlseg2e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_tumu(
@@ -529,7 +529,7 @@ void test_vlseg2e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_tumu(
@@ -544,6 +544,6 @@ void test_vlseg2e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e32ff_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64.c
index fe1cea1a8dfe..880ea4a96023 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) {
- return vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_tu(
@@ -30,7 +30,7 @@ void test_vlseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) {
- return vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_tu(
@@ -43,7 +43,7 @@ void test_vlseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) {
- return vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_tu(
@@ -56,7 +56,7 @@ void test_vlseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) {
- return vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_tu(
@@ -69,7 +69,7 @@ void test_vlseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) {
- return vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_tu(
@@ -82,7 +82,7 @@ void test_vlseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) {
- return vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_tu(
@@ -95,7 +95,7 @@ void test_vlseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) {
- return vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_tu(
@@ -108,7 +108,7 @@ void test_vlseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) {
- return vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_tu(
@@ -121,7 +121,7 @@ void test_vlseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) {
- return vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_tum(
@@ -134,7 +134,7 @@ void test_vlseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) {
- return vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_tum(
@@ -147,7 +147,7 @@ void test_vlseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) {
- return vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_tum(
@@ -160,7 +160,7 @@ void test_vlseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) {
- return vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_tum(
@@ -173,7 +173,7 @@ void test_vlseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) {
- return vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_tum(
@@ -186,7 +186,7 @@ void test_vlseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) {
- return vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_tum(
@@ -199,7 +199,7 @@ void test_vlseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) {
- return vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_tum(
@@ -212,7 +212,7 @@ void test_vlseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) {
- return vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_tum(
@@ -225,7 +225,7 @@ void test_vlseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) {
- return vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_tum(
@@ -238,7 +238,7 @@ void test_vlseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) {
- return vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_tumu(
@@ -251,7 +251,7 @@ void test_vlseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) {
- return vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_tumu(
@@ -264,7 +264,7 @@ void test_vlseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) {
- return vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_tumu(
@@ -277,7 +277,7 @@ void test_vlseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) {
- return vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_tumu(
@@ -290,7 +290,7 @@ void test_vlseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) {
- return vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_tumu(
@@ -303,7 +303,7 @@ void test_vlseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) {
- return vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_tumu(
@@ -316,7 +316,7 @@ void test_vlseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) {
- return vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_tumu(
@@ -329,7 +329,7 @@ void test_vlseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) {
- return vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_tumu(
@@ -342,7 +342,7 @@ void test_vlseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) {
- return vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_tumu(
@@ -355,6 +355,6 @@ void test_vlseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) {
- return vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c
index 77bea8b8fcee..a3139bb7f302 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_tu(
@@ -34,7 +34,7 @@ void test_vlseg2e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_tu(
@@ -49,7 +49,7 @@ void test_vlseg2e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_tu(
@@ -64,7 +64,7 @@ void test_vlseg2e64ff_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_tu(
@@ -79,7 +79,7 @@ void test_vlseg2e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_tu(
@@ -94,7 +94,7 @@ void test_vlseg2e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_tu(
@@ -109,7 +109,7 @@ void test_vlseg2e64ff_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_tu(
@@ -124,7 +124,7 @@ void test_vlseg2e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_tu(
@@ -139,7 +139,7 @@ void test_vlseg2e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_tum(
@@ -154,7 +154,7 @@ void test_vlseg2e64ff_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_tum(
@@ -169,7 +169,7 @@ void test_vlseg2e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_tum(
@@ -184,7 +184,7 @@ void test_vlseg2e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_tum(
@@ -199,7 +199,7 @@ void test_vlseg2e64ff_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_tum(
@@ -214,7 +214,7 @@ void test_vlseg2e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_tum(
@@ -229,7 +229,7 @@ void test_vlseg2e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_tum(
@@ -244,7 +244,7 @@ void test_vlseg2e64ff_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_tum(
@@ -259,7 +259,7 @@ void test_vlseg2e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_tum(
@@ -274,7 +274,7 @@ void test_vlseg2e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_tumu(
@@ -289,7 +289,7 @@ void test_vlseg2e64ff_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_tumu(
@@ -304,7 +304,7 @@ void test_vlseg2e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_tumu(
@@ -319,7 +319,7 @@ void test_vlseg2e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_tumu(
@@ -334,7 +334,7 @@ void test_vlseg2e64ff_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_tumu(
@@ -349,7 +349,7 @@ void test_vlseg2e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_tumu(
@@ -364,7 +364,7 @@ void test_vlseg2e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_tumu(
@@ -379,7 +379,7 @@ void test_vlseg2e64ff_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_tumu(
@@ -394,7 +394,7 @@ void test_vlseg2e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_tumu(
@@ -409,6 +409,6 @@ void test_vlseg2e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e64ff_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8.c
index cb138e284d5a..b4205e51ea2f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_tu(
@@ -29,7 +29,7 @@ void test_vlseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_tu(
@@ -42,7 +42,7 @@ void test_vlseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_tu(
@@ -55,7 +55,7 @@ void test_vlseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_tu(
@@ -68,7 +68,7 @@ void test_vlseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_tu(
@@ -81,7 +81,7 @@ void test_vlseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_tu(
@@ -94,7 +94,7 @@ void test_vlseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_tu(
@@ -107,7 +107,7 @@ void test_vlseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_tu(
@@ -120,7 +120,7 @@ void test_vlseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_tu(
@@ -133,7 +133,7 @@ void test_vlseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_tu(
@@ -146,7 +146,7 @@ void test_vlseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedof
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_tu(
@@ -159,7 +159,7 @@ void test_vlseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedof
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_tum(
@@ -172,7 +172,7 @@ void test_vlseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedof
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_tum(
@@ -185,7 +185,7 @@ void test_vlseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_tum(
@@ -198,7 +198,7 @@ void test_vlseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_tum(
@@ -211,7 +211,7 @@ void test_vlseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_tum(
@@ -224,7 +224,7 @@ void test_vlseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_tum(
@@ -237,7 +237,7 @@ void test_vlseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_tum(
@@ -250,7 +250,7 @@ void test_vlseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_tum(
@@ -263,7 +263,7 @@ void test_vlseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_tum(
@@ -276,7 +276,7 @@ void test_vlseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_tum(
@@ -289,7 +289,7 @@ void test_vlseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_tum(
@@ -302,7 +302,7 @@ void test_vlseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_tum(
@@ -315,7 +315,7 @@ void test_vlseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_tumu(
@@ -328,7 +328,7 @@ void test_vlseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_tumu(
@@ -341,7 +341,7 @@ void test_vlseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_tumu(
@@ -354,7 +354,7 @@ void test_vlseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_tumu(
@@ -367,7 +367,7 @@ void test_vlseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_tumu(
@@ -380,7 +380,7 @@ void test_vlseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_tumu(
@@ -393,7 +393,7 @@ void test_vlseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_tumu(
@@ -406,7 +406,7 @@ void test_vlseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_tumu(
@@ -419,7 +419,7 @@ void test_vlseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_tumu(
@@ -432,7 +432,7 @@ void test_vlseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_tumu(
@@ -445,7 +445,7 @@ void test_vlseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_tumu(
@@ -458,7 +458,7 @@ void test_vlseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_tumu(
@@ -471,6 +471,6 @@ void test_vlseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) {
- return vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+ return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c
index 9d2f414ff061..fe35a134ff24 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_tu(
@@ -34,7 +34,7 @@ void test_vlseg2e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_tu(
@@ -49,7 +49,7 @@ void test_vlseg2e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_tu(
@@ -64,7 +64,7 @@ void test_vlseg2e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_tu(
@@ -79,7 +79,7 @@ void test_vlseg2e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_tu(
@@ -94,7 +94,7 @@ void test_vlseg2e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_tu(
@@ -109,7 +109,7 @@ void test_vlseg2e8ff_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_tu(
@@ -124,7 +124,7 @@ void test_vlseg2e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_tu(
@@ -139,7 +139,7 @@ void test_vlseg2e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_tu(
@@ -154,7 +154,7 @@ void test_vlseg2e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_tu(
@@ -169,7 +169,7 @@ void test_vlseg2e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_tu(
@@ -184,7 +184,7 @@ void test_vlseg2e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_tum(
@@ -199,7 +199,7 @@ void test_vlseg2e8ff_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_tum(
@@ -214,7 +214,7 @@ void test_vlseg2e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_tum(
@@ -229,7 +229,7 @@ void test_vlseg2e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_tum(
@@ -244,7 +244,7 @@ void test_vlseg2e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_tum(
@@ -259,7 +259,7 @@ void test_vlseg2e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_tum(
@@ -274,7 +274,7 @@ void test_vlseg2e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_tum(
@@ -289,7 +289,7 @@ void test_vlseg2e8ff_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_tum(
@@ -304,7 +304,7 @@ void test_vlseg2e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_tum(
@@ -319,7 +319,7 @@ void test_vlseg2e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_tum(
@@ -334,7 +334,7 @@ void test_vlseg2e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_tum(
@@ -349,7 +349,7 @@ void test_vlseg2e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_tum(
@@ -364,7 +364,7 @@ void test_vlseg2e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_tumu(
@@ -379,7 +379,7 @@ void test_vlseg2e8ff_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_tumu(
@@ -394,7 +394,7 @@ void test_vlseg2e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_tumu(
@@ -409,7 +409,7 @@ void test_vlseg2e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_tumu(
@@ -424,7 +424,7 @@ void test_vlseg2e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_tumu(
@@ -439,7 +439,7 @@ void test_vlseg2e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_tumu(
@@ -454,7 +454,7 @@ void test_vlseg2e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_tumu(
@@ -469,7 +469,7 @@ void test_vlseg2e8ff_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_tumu(
@@ -484,7 +484,7 @@ void test_vlseg2e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_tumu(
@@ -499,7 +499,7 @@ void test_vlseg2e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_tumu(
@@ -514,7 +514,7 @@ void test_vlseg2e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_tumu(
@@ -529,7 +529,7 @@ void test_vlseg2e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_tumu(
@@ -544,6 +544,6 @@ void test_vlseg2e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg2e8ff_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+ return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16.c
index bac992067133..496248e943e1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vlseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vlseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vlseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_tu(
@@ -79,7 +79,7 @@ void test_vlseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_tu(
@@ -94,7 +94,7 @@ void test_vlseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_tu(
@@ -109,7 +109,7 @@ void test_vlseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_tu(
@@ -124,7 +124,7 @@ void test_vlseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_tu(
@@ -139,7 +139,7 @@ void test_vlseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_tu(
@@ -154,7 +154,7 @@ void test_vlseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_tu(
@@ -169,7 +169,7 @@ void test_vlseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_tu(
@@ -184,7 +184,7 @@ void test_vlseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_tum(
@@ -199,7 +199,7 @@ void test_vlseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_tum(
@@ -214,7 +214,7 @@ void test_vlseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_tum(
@@ -229,7 +229,7 @@ void test_vlseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_tum(
@@ -244,7 +244,7 @@ void test_vlseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_tum(
@@ -259,7 +259,7 @@ void test_vlseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_tum(
@@ -274,7 +274,7 @@ void test_vlseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_tum(
@@ -289,7 +289,7 @@ void test_vlseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_tum(
@@ -304,7 +304,7 @@ void test_vlseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_tum(
@@ -319,7 +319,7 @@ void test_vlseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_tum(
@@ -334,7 +334,7 @@ void test_vlseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_tum(
@@ -349,7 +349,7 @@ void test_vlseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_tum(
@@ -364,7 +364,7 @@ void test_vlseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_tumu(
@@ -379,7 +379,7 @@ void test_vlseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_tumu(
@@ -394,7 +394,7 @@ void test_vlseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_tumu(
@@ -409,7 +409,7 @@ void test_vlseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_tumu(
@@ -424,7 +424,7 @@ void test_vlseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_tumu(
@@ -439,7 +439,7 @@ void test_vlseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_tumu(
@@ -454,7 +454,7 @@ void test_vlseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_tumu(
@@ -469,7 +469,7 @@ void test_vlseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_tumu(
@@ -484,7 +484,7 @@ void test_vlseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_tumu(
@@ -499,7 +499,7 @@ void test_vlseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_tumu(
@@ -514,7 +514,7 @@ void test_vlseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_tumu(
@@ -529,7 +529,7 @@ void test_vlseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_tumu(
@@ -544,6 +544,6 @@ void test_vlseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) {
- return vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c
index e32161daaf67..32419f518470 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vlseg3e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vlseg3e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vlseg3e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_tu(
@@ -89,7 +89,7 @@ void test_vlseg3e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_tu(
@@ -106,7 +106,7 @@ void test_vlseg3e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_tu(
@@ -123,7 +123,7 @@ void test_vlseg3e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_tu(
@@ -140,7 +140,7 @@ void test_vlseg3e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_tu(
@@ -157,7 +157,7 @@ void test_vlseg3e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_tu(
@@ -174,7 +174,7 @@ void test_vlseg3e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_tu(
@@ -191,7 +191,7 @@ void test_vlseg3e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_tu(
@@ -208,7 +208,7 @@ void test_vlseg3e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_tum(
@@ -225,7 +225,7 @@ void test_vlseg3e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_tum(
@@ -242,7 +242,7 @@ void test_vlseg3e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_tum(
@@ -259,7 +259,7 @@ void test_vlseg3e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_tum(
@@ -276,7 +276,7 @@ void test_vlseg3e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_tum(
@@ -293,7 +293,7 @@ void test_vlseg3e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_tum(
@@ -310,7 +310,7 @@ void test_vlseg3e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_tum(
@@ -327,7 +327,7 @@ void test_vlseg3e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_tum(
@@ -344,7 +344,7 @@ void test_vlseg3e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_tum(
@@ -361,7 +361,7 @@ void test_vlseg3e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_tum(
@@ -378,7 +378,7 @@ void test_vlseg3e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_tum(
@@ -395,7 +395,7 @@ void test_vlseg3e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_tum(
@@ -412,7 +412,7 @@ void test_vlseg3e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_tumu(
@@ -429,7 +429,7 @@ void test_vlseg3e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_tumu(
@@ -446,7 +446,7 @@ void test_vlseg3e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_tumu(
@@ -463,7 +463,7 @@ void test_vlseg3e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_tumu(
@@ -480,7 +480,7 @@ void test_vlseg3e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_tumu(
@@ -497,7 +497,7 @@ void test_vlseg3e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_tumu(
@@ -514,7 +514,7 @@ void test_vlseg3e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_tumu(
@@ -531,7 +531,7 @@ void test_vlseg3e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_tumu(
@@ -548,7 +548,7 @@ void test_vlseg3e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_tumu(
@@ -565,7 +565,7 @@ void test_vlseg3e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_tumu(
@@ -582,7 +582,7 @@ void test_vlseg3e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_tumu(
@@ -599,7 +599,7 @@ void test_vlseg3e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_tumu(
@@ -616,6 +616,6 @@ void test_vlseg3e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32.c
index 958f81808353..be3710040653 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) {
- return vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_tu(
@@ -34,7 +34,7 @@ void test_vlseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) {
- return vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_tu(
@@ -49,7 +49,7 @@ void test_vlseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) {
- return vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_tu(
@@ -64,7 +64,7 @@ void test_vlseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) {
- return vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_tu(
@@ -79,7 +79,7 @@ void test_vlseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) {
- return vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_tu(
@@ -94,7 +94,7 @@ void test_vlseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) {
- return vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_tu(
@@ -109,7 +109,7 @@ void test_vlseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) {
- return vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_tu(
@@ -124,7 +124,7 @@ void test_vlseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) {
- return vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_tu(
@@ -139,7 +139,7 @@ void test_vlseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) {
- return vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_tum(
@@ -154,7 +154,7 @@ void test_vlseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) {
- return vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_tum(
@@ -169,7 +169,7 @@ void test_vlseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) {
- return vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_tum(
@@ -184,7 +184,7 @@ void test_vlseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) {
- return vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_tum(
@@ -199,7 +199,7 @@ void test_vlseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) {
- return vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_tum(
@@ -214,7 +214,7 @@ void test_vlseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) {
- return vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_tum(
@@ -229,7 +229,7 @@ void test_vlseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) {
- return vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_tum(
@@ -244,7 +244,7 @@ void test_vlseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) {
- return vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_tum(
@@ -259,7 +259,7 @@ void test_vlseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) {
- return vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_tum(
@@ -274,7 +274,7 @@ void test_vlseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) {
- return vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_tumu(
@@ -289,7 +289,7 @@ void test_vlseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) {
- return vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_tumu(
@@ -304,7 +304,7 @@ void test_vlseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) {
- return vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_tumu(
@@ -319,7 +319,7 @@ void test_vlseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) {
- return vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_tumu(
@@ -334,7 +334,7 @@ void test_vlseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) {
- return vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_tumu(
@@ -349,7 +349,7 @@ void test_vlseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) {
- return vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_tumu(
@@ -364,7 +364,7 @@ void test_vlseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) {
- return vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_tumu(
@@ -379,7 +379,7 @@ void test_vlseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) {
- return vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_tumu(
@@ -394,7 +394,7 @@ void test_vlseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) {
- return vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_tumu(
@@ -409,6 +409,6 @@ void test_vlseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) {
- return vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c
index 252022525656..81aa4ceb24cb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_tu(
@@ -38,7 +38,7 @@ void test_vlseg3e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_tu(
@@ -55,7 +55,7 @@ void test_vlseg3e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_tu(
@@ -72,7 +72,7 @@ void test_vlseg3e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_tu(
@@ -89,7 +89,7 @@ void test_vlseg3e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_tu(
@@ -106,7 +106,7 @@ void test_vlseg3e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_tu(
@@ -123,7 +123,7 @@ void test_vlseg3e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_tu(
@@ -140,7 +140,7 @@ void test_vlseg3e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_tu(
@@ -157,7 +157,7 @@ void test_vlseg3e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_tum(
@@ -174,7 +174,7 @@ void test_vlseg3e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_tum(
@@ -191,7 +191,7 @@ void test_vlseg3e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_tum(
@@ -208,7 +208,7 @@ void test_vlseg3e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_tum(
@@ -225,7 +225,7 @@ void test_vlseg3e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_tum(
@@ -242,7 +242,7 @@ void test_vlseg3e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_tum(
@@ -259,7 +259,7 @@ void test_vlseg3e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_tum(
@@ -276,7 +276,7 @@ void test_vlseg3e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_tum(
@@ -293,7 +293,7 @@ void test_vlseg3e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_tum(
@@ -310,7 +310,7 @@ void test_vlseg3e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_tumu(
@@ -327,7 +327,7 @@ void test_vlseg3e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_tumu(
@@ -344,7 +344,7 @@ void test_vlseg3e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_tumu(
@@ -361,7 +361,7 @@ void test_vlseg3e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_tumu(
@@ -378,7 +378,7 @@ void test_vlseg3e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_tumu(
@@ -395,7 +395,7 @@ void test_vlseg3e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_tumu(
@@ -412,7 +412,7 @@ void test_vlseg3e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_tumu(
@@ -429,7 +429,7 @@ void test_vlseg3e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_tumu(
@@ -446,7 +446,7 @@ void test_vlseg3e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_tumu(
@@ -463,6 +463,6 @@ void test_vlseg3e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64.c
index 77af769e5c62..fb2037f19ce0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) {
- return vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_tu(
@@ -34,7 +34,7 @@ void test_vlseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) {
- return vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_tu(
@@ -49,7 +49,7 @@ void test_vlseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) {
- return vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_tu(
@@ -64,7 +64,7 @@ void test_vlseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) {
- return vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_tu(
@@ -79,7 +79,7 @@ void test_vlseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) {
- return vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_tu(
@@ -94,7 +94,7 @@ void test_vlseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) {
- return vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_tum(
@@ -109,7 +109,7 @@ void test_vlseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) {
- return vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_tum(
@@ -124,7 +124,7 @@ void test_vlseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) {
- return vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_tum(
@@ -139,7 +139,7 @@ void test_vlseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) {
- return vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_tum(
@@ -154,7 +154,7 @@ void test_vlseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) {
- return vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_tum(
@@ -169,7 +169,7 @@ void test_vlseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) {
- return vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_tum(
@@ -184,7 +184,7 @@ void test_vlseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) {
- return vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_tumu(
@@ -199,7 +199,7 @@ void test_vlseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) {
- return vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_tumu(
@@ -214,7 +214,7 @@ void test_vlseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) {
- return vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_tumu(
@@ -229,7 +229,7 @@ void test_vlseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) {
- return vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_tumu(
@@ -244,7 +244,7 @@ void test_vlseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) {
- return vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_tumu(
@@ -259,7 +259,7 @@ void test_vlseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) {
- return vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_tumu(
@@ -274,6 +274,6 @@ void test_vlseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) {
- return vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c
index 53f86baed156..bc3597f93238 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_tu(
@@ -38,7 +38,7 @@ void test_vlseg3e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_tu(
@@ -55,7 +55,7 @@ void test_vlseg3e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_tu(
@@ -72,7 +72,7 @@ void test_vlseg3e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_tu(
@@ -89,7 +89,7 @@ void test_vlseg3e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_tu(
@@ -106,7 +106,7 @@ void test_vlseg3e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_tum(
@@ -123,7 +123,7 @@ void test_vlseg3e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_tum(
@@ -140,7 +140,7 @@ void test_vlseg3e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_tum(
@@ -157,7 +157,7 @@ void test_vlseg3e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_tum(
@@ -174,7 +174,7 @@ void test_vlseg3e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_tum(
@@ -191,7 +191,7 @@ void test_vlseg3e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_tum(
@@ -208,7 +208,7 @@ void test_vlseg3e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_tumu(
@@ -225,7 +225,7 @@ void test_vlseg3e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_tumu(
@@ -242,7 +242,7 @@ void test_vlseg3e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_tumu(
@@ -259,7 +259,7 @@ void test_vlseg3e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_tumu(
@@ -276,7 +276,7 @@ void test_vlseg3e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_tumu(
@@ -293,7 +293,7 @@ void test_vlseg3e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_tumu(
@@ -310,6 +310,6 @@ void test_vlseg3e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8.c
index a5705a2f4683..c8aff799d365 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8.c
@@ -18,7 +18,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_tu(
@@ -33,7 +33,7 @@ void test_vlseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_tu(
@@ -48,7 +48,7 @@ void test_vlseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_tu(
@@ -63,7 +63,7 @@ void test_vlseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_tu(
@@ -78,7 +78,7 @@ void test_vlseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_tu(
@@ -93,7 +93,7 @@ void test_vlseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_tu(
@@ -108,7 +108,7 @@ void test_vlseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_tu(
@@ -123,7 +123,7 @@ void test_vlseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_tu(
@@ -138,7 +138,7 @@ void test_vlseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_tu(
@@ -153,7 +153,7 @@ void test_vlseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_tum(
@@ -168,7 +168,7 @@ void test_vlseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_tum(
@@ -183,7 +183,7 @@ void test_vlseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_tum(
@@ -198,7 +198,7 @@ void test_vlseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_tum(
@@ -213,7 +213,7 @@ void test_vlseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_tum(
@@ -228,7 +228,7 @@ void test_vlseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_tum(
@@ -243,7 +243,7 @@ void test_vlseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_tum(
@@ -258,7 +258,7 @@ void test_vlseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_tum(
@@ -273,7 +273,7 @@ void test_vlseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_tum(
@@ -288,7 +288,7 @@ void test_vlseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_tum(
@@ -303,7 +303,7 @@ void test_vlseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_tumu(
@@ -318,7 +318,7 @@ void test_vlseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_tumu(
@@ -333,7 +333,7 @@ void test_vlseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_tumu(
@@ -348,7 +348,7 @@ void test_vlseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_tumu(
@@ -363,7 +363,7 @@ void test_vlseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_tumu(
@@ -378,7 +378,7 @@ void test_vlseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_tumu(
@@ -393,7 +393,7 @@ void test_vlseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_tumu(
@@ -408,7 +408,7 @@ void test_vlseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_tumu(
@@ -423,7 +423,7 @@ void test_vlseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_tumu(
@@ -438,7 +438,7 @@ void test_vlseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_tumu(
@@ -453,6 +453,6 @@ void test_vlseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) {
- return vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+ return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c
index 48fa875f9ef6..026b59e3542a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_tu(
@@ -38,7 +38,7 @@ void test_vlseg3e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_tu(
@@ -55,7 +55,7 @@ void test_vlseg3e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_tu(
@@ -72,7 +72,7 @@ void test_vlseg3e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_tu(
@@ -89,7 +89,7 @@ void test_vlseg3e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_tu(
@@ -106,7 +106,7 @@ void test_vlseg3e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_tu(
@@ -123,7 +123,7 @@ void test_vlseg3e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_tu(
@@ -140,7 +140,7 @@ void test_vlseg3e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_tu(
@@ -157,7 +157,7 @@ void test_vlseg3e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_tu(
@@ -174,7 +174,7 @@ void test_vlseg3e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_tum(
@@ -191,7 +191,7 @@ void test_vlseg3e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_tum(
@@ -208,7 +208,7 @@ void test_vlseg3e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_tum(
@@ -225,7 +225,7 @@ void test_vlseg3e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_tum(
@@ -242,7 +242,7 @@ void test_vlseg3e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_tum(
@@ -259,7 +259,7 @@ void test_vlseg3e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_tum(
@@ -276,7 +276,7 @@ void test_vlseg3e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_tum(
@@ -293,7 +293,7 @@ void test_vlseg3e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_tum(
@@ -310,7 +310,7 @@ void test_vlseg3e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_tum(
@@ -327,7 +327,7 @@ void test_vlseg3e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_tum(
@@ -344,7 +344,7 @@ void test_vlseg3e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_tumu(
@@ -361,7 +361,7 @@ void test_vlseg3e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_tumu(
@@ -378,7 +378,7 @@ void test_vlseg3e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_tumu(
@@ -395,7 +395,7 @@ void test_vlseg3e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_tumu(
@@ -412,7 +412,7 @@ void test_vlseg3e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_tumu(
@@ -429,7 +429,7 @@ void test_vlseg3e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_tumu(
@@ -446,7 +446,7 @@ void test_vlseg3e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_tumu(
@@ -463,7 +463,7 @@ void test_vlseg3e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_tumu(
@@ -480,7 +480,7 @@ void test_vlseg3e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_tumu(
@@ -497,7 +497,7 @@ void test_vlseg3e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_tumu(
@@ -514,6 +514,6 @@ void test_vlseg3e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg3e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+ return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16.c
index 590f83f921a4..831f0f480112 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vlseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vlseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vlseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_tu(
@@ -89,7 +89,7 @@ void test_vlseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_tu(
@@ -106,7 +106,7 @@ void test_vlseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_tu(
@@ -123,7 +123,7 @@ void test_vlseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_tu(
@@ -140,7 +140,7 @@ void test_vlseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_tu(
@@ -157,7 +157,7 @@ void test_vlseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_tu(
@@ -174,7 +174,7 @@ void test_vlseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_tu(
@@ -191,7 +191,7 @@ void test_vlseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_tu(
@@ -208,7 +208,7 @@ void test_vlseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_tum(
@@ -225,7 +225,7 @@ void test_vlseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_tum(
@@ -242,7 +242,7 @@ void test_vlseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_tum(
@@ -259,7 +259,7 @@ void test_vlseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_tum(
@@ -276,7 +276,7 @@ void test_vlseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_tum(
@@ -293,7 +293,7 @@ void test_vlseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_tum(
@@ -310,7 +310,7 @@ void test_vlseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_tum(
@@ -327,7 +327,7 @@ void test_vlseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_tum(
@@ -344,7 +344,7 @@ void test_vlseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_tum(
@@ -361,7 +361,7 @@ void test_vlseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_tum(
@@ -378,7 +378,7 @@ void test_vlseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_tum(
@@ -395,7 +395,7 @@ void test_vlseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_tum(
@@ -412,7 +412,7 @@ void test_vlseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_tumu(
@@ -429,7 +429,7 @@ void test_vlseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_tumu(
@@ -446,7 +446,7 @@ void test_vlseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_tumu(
@@ -463,7 +463,7 @@ void test_vlseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_tumu(
@@ -480,7 +480,7 @@ void test_vlseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_tumu(
@@ -497,7 +497,7 @@ void test_vlseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_tumu(
@@ -514,7 +514,7 @@ void test_vlseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_tumu(
@@ -531,7 +531,7 @@ void test_vlseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_tumu(
@@ -548,7 +548,7 @@ void test_vlseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_tumu(
@@ -565,7 +565,7 @@ void test_vlseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_tumu(
@@ -582,7 +582,7 @@ void test_vlseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_tumu(
@@ -599,7 +599,7 @@ void test_vlseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_tumu(
@@ -616,6 +616,6 @@ void test_vlseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) {
- return vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c
index 1c6ca01bd830..3a06b54800d7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vlseg4e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vlseg4e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_tu(
@@ -80,7 +80,7 @@ void test_vlseg4e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_tu(
@@ -99,7 +99,7 @@ void test_vlseg4e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_tu(
@@ -118,7 +118,7 @@ void test_vlseg4e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_tu(
@@ -137,7 +137,7 @@ void test_vlseg4e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_tu(
@@ -156,7 +156,7 @@ void test_vlseg4e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_tu(
@@ -175,7 +175,7 @@ void test_vlseg4e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_tu(
@@ -194,7 +194,7 @@ void test_vlseg4e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_tu(
@@ -213,7 +213,7 @@ void test_vlseg4e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_tu(
@@ -232,7 +232,7 @@ void test_vlseg4e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_tum(
@@ -251,7 +251,7 @@ void test_vlseg4e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_tum(
@@ -270,7 +270,7 @@ void test_vlseg4e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_tum(
@@ -289,7 +289,7 @@ void test_vlseg4e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_tum(
@@ -308,7 +308,7 @@ void test_vlseg4e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_tum(
@@ -327,7 +327,7 @@ void test_vlseg4e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_tum(
@@ -346,7 +346,7 @@ void test_vlseg4e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_tum(
@@ -365,7 +365,7 @@ void test_vlseg4e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_tum(
@@ -384,7 +384,7 @@ void test_vlseg4e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_tum(
@@ -403,7 +403,7 @@ void test_vlseg4e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_tum(
@@ -422,7 +422,7 @@ void test_vlseg4e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_tum(
@@ -441,7 +441,7 @@ void test_vlseg4e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_tum(
@@ -460,7 +460,7 @@ void test_vlseg4e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_tumu(
@@ -479,7 +479,7 @@ void test_vlseg4e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_tumu(
@@ -498,7 +498,7 @@ void test_vlseg4e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_tumu(
@@ -517,7 +517,7 @@ void test_vlseg4e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_tumu(
@@ -536,7 +536,7 @@ void test_vlseg4e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_tumu(
@@ -555,7 +555,7 @@ void test_vlseg4e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_tumu(
@@ -574,7 +574,7 @@ void test_vlseg4e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_tumu(
@@ -593,7 +593,7 @@ void test_vlseg4e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_tumu(
@@ -612,7 +612,7 @@ void test_vlseg4e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_tumu(
@@ -631,7 +631,7 @@ void test_vlseg4e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_tumu(
@@ -650,7 +650,7 @@ void test_vlseg4e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_tumu(
@@ -669,7 +669,7 @@ void test_vlseg4e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_tumu(
@@ -688,6 +688,6 @@ void test_vlseg4e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32.c
index 7ef5eae17cd4..9de2869cfd01 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) {
- return vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_tu(
@@ -38,7 +38,7 @@ void test_vlseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) {
- return vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_tu(
@@ -55,7 +55,7 @@ void test_vlseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) {
- return vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_tu(
@@ -72,7 +72,7 @@ void test_vlseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) {
- return vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_tu(
@@ -89,7 +89,7 @@ void test_vlseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) {
- return vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_tu(
@@ -106,7 +106,7 @@ void test_vlseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) {
- return vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_tu(
@@ -123,7 +123,7 @@ void test_vlseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) {
- return vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_tu(
@@ -140,7 +140,7 @@ void test_vlseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) {
- return vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_tu(
@@ -157,7 +157,7 @@ void test_vlseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) {
- return vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_tum(
@@ -174,7 +174,7 @@ void test_vlseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) {
- return vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_tum(
@@ -191,7 +191,7 @@ void test_vlseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) {
- return vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_tum(
@@ -208,7 +208,7 @@ void test_vlseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) {
- return vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_tum(
@@ -225,7 +225,7 @@ void test_vlseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) {
- return vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_tum(
@@ -242,7 +242,7 @@ void test_vlseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) {
- return vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_tum(
@@ -259,7 +259,7 @@ void test_vlseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) {
- return vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_tum(
@@ -276,7 +276,7 @@ void test_vlseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) {
- return vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_tum(
@@ -293,7 +293,7 @@ void test_vlseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) {
- return vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_tum(
@@ -310,7 +310,7 @@ void test_vlseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) {
- return vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_tumu(
@@ -327,7 +327,7 @@ void test_vlseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) {
- return vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_tumu(
@@ -344,7 +344,7 @@ void test_vlseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) {
- return vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_tumu(
@@ -361,7 +361,7 @@ void test_vlseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) {
- return vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_tumu(
@@ -378,7 +378,7 @@ void test_vlseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) {
- return vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_tumu(
@@ -395,7 +395,7 @@ void test_vlseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) {
- return vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_tumu(
@@ -412,7 +412,7 @@ void test_vlseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) {
- return vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_tumu(
@@ -429,7 +429,7 @@ void test_vlseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) {
- return vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_tumu(
@@ -446,7 +446,7 @@ void test_vlseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) {
- return vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_tumu(
@@ -463,6 +463,6 @@ void test_vlseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) {
- return vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c
index bbdcec9e860c..8134d9077300 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_tu(
@@ -42,7 +42,7 @@ void test_vlseg4e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_tu(
@@ -61,7 +61,7 @@ void test_vlseg4e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_tu(
@@ -80,7 +80,7 @@ void test_vlseg4e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_tu(
@@ -99,7 +99,7 @@ void test_vlseg4e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_tu(
@@ -118,7 +118,7 @@ void test_vlseg4e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_tu(
@@ -137,7 +137,7 @@ void test_vlseg4e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_tu(
@@ -156,7 +156,7 @@ void test_vlseg4e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_tu(
@@ -175,7 +175,7 @@ void test_vlseg4e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_tum(
@@ -194,7 +194,7 @@ void test_vlseg4e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_tum(
@@ -213,7 +213,7 @@ void test_vlseg4e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_tum(
@@ -232,7 +232,7 @@ void test_vlseg4e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_tum(
@@ -251,7 +251,7 @@ void test_vlseg4e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_tum(
@@ -270,7 +270,7 @@ void test_vlseg4e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_tum(
@@ -289,7 +289,7 @@ void test_vlseg4e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_tum(
@@ -308,7 +308,7 @@ void test_vlseg4e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_tum(
@@ -327,7 +327,7 @@ void test_vlseg4e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_tum(
@@ -346,7 +346,7 @@ void test_vlseg4e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_tumu(
@@ -365,7 +365,7 @@ void test_vlseg4e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_tumu(
@@ -384,7 +384,7 @@ void test_vlseg4e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_tumu(
@@ -403,7 +403,7 @@ void test_vlseg4e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_tumu(
@@ -422,7 +422,7 @@ void test_vlseg4e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_tumu(
@@ -441,7 +441,7 @@ void test_vlseg4e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_tumu(
@@ -460,7 +460,7 @@ void test_vlseg4e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_tumu(
@@ -479,7 +479,7 @@ void test_vlseg4e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_tumu(
@@ -498,7 +498,7 @@ void test_vlseg4e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_tumu(
@@ -517,6 +517,6 @@ void test_vlseg4e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64.c
index 2d57886c0e94..262f09d3f1a5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) {
- return vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_tu(
@@ -38,7 +38,7 @@ void test_vlseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) {
- return vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_tu(
@@ -55,7 +55,7 @@ void test_vlseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) {
- return vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_tu(
@@ -72,7 +72,7 @@ void test_vlseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) {
- return vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_tu(
@@ -89,7 +89,7 @@ void test_vlseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) {
- return vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_tu(
@@ -106,7 +106,7 @@ void test_vlseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) {
- return vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_tum(
@@ -123,7 +123,7 @@ void test_vlseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) {
- return vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_tum(
@@ -140,7 +140,7 @@ void test_vlseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) {
- return vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_tum(
@@ -157,7 +157,7 @@ void test_vlseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) {
- return vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_tum(
@@ -174,7 +174,7 @@ void test_vlseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) {
- return vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_tum(
@@ -191,7 +191,7 @@ void test_vlseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) {
- return vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_tum(
@@ -208,7 +208,7 @@ void test_vlseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) {
- return vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_tumu(
@@ -225,7 +225,7 @@ void test_vlseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) {
- return vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_tumu(
@@ -242,7 +242,7 @@ void test_vlseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) {
- return vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_tumu(
@@ -259,7 +259,7 @@ void test_vlseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) {
- return vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_tumu(
@@ -276,7 +276,7 @@ void test_vlseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) {
- return vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_tumu(
@@ -293,7 +293,7 @@ void test_vlseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) {
- return vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_tumu(
@@ -310,6 +310,6 @@ void test_vlseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) {
- return vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c
index f306e422eb1a..ad9c9c324e48 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_tu(
@@ -42,7 +42,7 @@ void test_vlseg4e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_tu(
@@ -61,7 +61,7 @@ void test_vlseg4e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_tu(
@@ -80,7 +80,7 @@ void test_vlseg4e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_tu(
@@ -99,7 +99,7 @@ void test_vlseg4e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_tu(
@@ -118,7 +118,7 @@ void test_vlseg4e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_tum(
@@ -137,7 +137,7 @@ void test_vlseg4e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_tum(
@@ -156,7 +156,7 @@ void test_vlseg4e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_tum(
@@ -175,7 +175,7 @@ void test_vlseg4e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_tum(
@@ -194,7 +194,7 @@ void test_vlseg4e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_tum(
@@ -213,7 +213,7 @@ void test_vlseg4e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_tum(
@@ -232,7 +232,7 @@ void test_vlseg4e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_tumu(
@@ -251,7 +251,7 @@ void test_vlseg4e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_tumu(
@@ -270,7 +270,7 @@ void test_vlseg4e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_tumu(
@@ -289,7 +289,7 @@ void test_vlseg4e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_tumu(
@@ -308,7 +308,7 @@ void test_vlseg4e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_tumu(
@@ -327,7 +327,7 @@ void test_vlseg4e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_tumu(
@@ -346,6 +346,6 @@ void test_vlseg4e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8.c
index 7a1c992a3ed3..4e11a579f8ad 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8.c
@@ -20,7 +20,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_tu(
@@ -37,7 +37,7 @@ void test_vlseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_tu(
@@ -54,7 +54,7 @@ void test_vlseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_tu(
@@ -71,7 +71,7 @@ void test_vlseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_tu(
@@ -88,7 +88,7 @@ void test_vlseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_tu(
@@ -105,7 +105,7 @@ void test_vlseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_tu(
@@ -122,7 +122,7 @@ void test_vlseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_tu(
@@ -139,7 +139,7 @@ void test_vlseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_tu(
@@ -156,7 +156,7 @@ void test_vlseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_tu(
@@ -173,7 +173,7 @@ void test_vlseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_tum(
@@ -190,7 +190,7 @@ void test_vlseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_tum(
@@ -207,7 +207,7 @@ void test_vlseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_tum(
@@ -224,7 +224,7 @@ void test_vlseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_tum(
@@ -241,7 +241,7 @@ void test_vlseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_tum(
@@ -258,7 +258,7 @@ void test_vlseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_tum(
@@ -275,7 +275,7 @@ void test_vlseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_tum(
@@ -292,7 +292,7 @@ void test_vlseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_tum(
@@ -309,7 +309,7 @@ void test_vlseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_tum(
@@ -326,7 +326,7 @@ void test_vlseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_tum(
@@ -343,7 +343,7 @@ void test_vlseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_tumu(
@@ -360,7 +360,7 @@ void test_vlseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_tumu(
@@ -377,7 +377,7 @@ void test_vlseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_tumu(
@@ -394,7 +394,7 @@ void test_vlseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_tumu(
@@ -411,7 +411,7 @@ void test_vlseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_tumu(
@@ -428,7 +428,7 @@ void test_vlseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_tumu(
@@ -445,7 +445,7 @@ void test_vlseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_tumu(
@@ -462,7 +462,7 @@ void test_vlseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_tumu(
@@ -479,7 +479,7 @@ void test_vlseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_tumu(
@@ -496,7 +496,7 @@ void test_vlseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_tumu(
@@ -513,6 +513,6 @@ void test_vlseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) {
- return vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+ return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c
index bc98017a407d..c938ac4295e7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_tu(
@@ -42,7 +42,7 @@ void test_vlseg4e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_tu(
@@ -61,7 +61,7 @@ void test_vlseg4e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_tu(
@@ -80,7 +80,7 @@ void test_vlseg4e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_tu(
@@ -99,7 +99,7 @@ void test_vlseg4e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_tu(
@@ -118,7 +118,7 @@ void test_vlseg4e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_tu(
@@ -137,7 +137,7 @@ void test_vlseg4e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_tu(
@@ -156,7 +156,7 @@ void test_vlseg4e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_tu(
@@ -175,7 +175,7 @@ void test_vlseg4e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_tu(
@@ -194,7 +194,7 @@ void test_vlseg4e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_tum(
@@ -213,7 +213,7 @@ void test_vlseg4e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_tum(
@@ -232,7 +232,7 @@ void test_vlseg4e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_tum(
@@ -251,7 +251,7 @@ void test_vlseg4e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_tum(
@@ -270,7 +270,7 @@ void test_vlseg4e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_tum(
@@ -289,7 +289,7 @@ void test_vlseg4e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_tum(
@@ -308,7 +308,7 @@ void test_vlseg4e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_tum(
@@ -327,7 +327,7 @@ void test_vlseg4e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_tum(
@@ -346,7 +346,7 @@ void test_vlseg4e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_tum(
@@ -365,7 +365,7 @@ void test_vlseg4e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_tum(
@@ -384,7 +384,7 @@ void test_vlseg4e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_tumu(
@@ -403,7 +403,7 @@ void test_vlseg4e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_tumu(
@@ -422,7 +422,7 @@ void test_vlseg4e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_tumu(
@@ -441,7 +441,7 @@ void test_vlseg4e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_tumu(
@@ -460,7 +460,7 @@ void test_vlseg4e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_tumu(
@@ -479,7 +479,7 @@ void test_vlseg4e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_tumu(
@@ -498,7 +498,7 @@ void test_vlseg4e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_tumu(
@@ -517,7 +517,7 @@ void test_vlseg4e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_tumu(
@@ -536,7 +536,7 @@ void test_vlseg4e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_tumu(
@@ -555,7 +555,7 @@ void test_vlseg4e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_tumu(
@@ -574,6 +574,6 @@ void test_vlseg4e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg4e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+ return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16.c
index fbe47b92d9fa..9db594d79b3f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) {
- return vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vlseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) {
- return vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vlseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) {
- return vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_tu(
@@ -80,7 +80,7 @@ void test_vlseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) {
- return vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_tu(
@@ -99,7 +99,7 @@ void test_vlseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) {
- return vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_tu(
@@ -118,7 +118,7 @@ void test_vlseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) {
- return vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_tu(
@@ -137,7 +137,7 @@ void test_vlseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) {
- return vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_tu(
@@ -156,7 +156,7 @@ void test_vlseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) {
- return vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_tu(
@@ -175,7 +175,7 @@ void test_vlseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) {
- return vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_tum(
@@ -194,7 +194,7 @@ void test_vlseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) {
- return vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_tum(
@@ -213,7 +213,7 @@ void test_vlseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) {
- return vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_tum(
@@ -232,7 +232,7 @@ void test_vlseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) {
- return vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_tum(
@@ -251,7 +251,7 @@ void test_vlseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) {
- return vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_tum(
@@ -270,7 +270,7 @@ void test_vlseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) {
- return vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_tum(
@@ -289,7 +289,7 @@ void test_vlseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) {
- return vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_tum(
@@ -308,7 +308,7 @@ void test_vlseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) {
- return vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_tum(
@@ -327,7 +327,7 @@ void test_vlseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) {
- return vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_tum(
@@ -346,7 +346,7 @@ void test_vlseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) {
- return vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_tumu(
@@ -365,7 +365,7 @@ void test_vlseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) {
- return vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_tumu(
@@ -384,7 +384,7 @@ void test_vlseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) {
- return vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_tumu(
@@ -403,7 +403,7 @@ void test_vlseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) {
- return vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_tumu(
@@ -422,7 +422,7 @@ void test_vlseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) {
- return vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_tumu(
@@ -441,7 +441,7 @@ void test_vlseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) {
- return vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_tumu(
@@ -460,7 +460,7 @@ void test_vlseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) {
- return vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_tumu(
@@ -479,7 +479,7 @@ void test_vlseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) {
- return vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_tumu(
@@ -498,7 +498,7 @@ void test_vlseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) {
- return vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_tumu(
@@ -517,6 +517,6 @@ void test_vlseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) {
- return vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c
index e0b9b36c5af5..ee00f368bc6d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vlseg5e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vlseg5e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_tu(
@@ -88,7 +88,7 @@ void test_vlseg5e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_tu(
@@ -109,7 +109,7 @@ void test_vlseg5e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_tu(
@@ -130,7 +130,7 @@ void test_vlseg5e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_tu(
@@ -151,7 +151,7 @@ void test_vlseg5e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_tu(
@@ -172,7 +172,7 @@ void test_vlseg5e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_tu(
@@ -193,7 +193,7 @@ void test_vlseg5e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_tum(
@@ -214,7 +214,7 @@ void test_vlseg5e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_tum(
@@ -235,7 +235,7 @@ void test_vlseg5e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_tum(
@@ -256,7 +256,7 @@ void test_vlseg5e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_tum(
@@ -277,7 +277,7 @@ void test_vlseg5e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_tum(
@@ -298,7 +298,7 @@ void test_vlseg5e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_tum(
@@ -319,7 +319,7 @@ void test_vlseg5e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_tum(
@@ -340,7 +340,7 @@ void test_vlseg5e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_tum(
@@ -361,7 +361,7 @@ void test_vlseg5e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_tum(
@@ -382,7 +382,7 @@ void test_vlseg5e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_tumu(
@@ -403,7 +403,7 @@ void test_vlseg5e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_tumu(
@@ -424,7 +424,7 @@ void test_vlseg5e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_tumu(
@@ -445,7 +445,7 @@ void test_vlseg5e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_tumu(
@@ -466,7 +466,7 @@ void test_vlseg5e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_tumu(
@@ -487,7 +487,7 @@ void test_vlseg5e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_tumu(
@@ -508,7 +508,7 @@ void test_vlseg5e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_tumu(
@@ -529,7 +529,7 @@ void test_vlseg5e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_tumu(
@@ -550,7 +550,7 @@ void test_vlseg5e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_tumu(
@@ -571,6 +571,6 @@ void test_vlseg5e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32.c
index e9e989963110..7b322612e05b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) {
- return vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_tu(
@@ -42,7 +42,7 @@ void test_vlseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) {
- return vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_tu(
@@ -61,7 +61,7 @@ void test_vlseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) {
- return vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_tu(
@@ -80,7 +80,7 @@ void test_vlseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) {
- return vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_tu(
@@ -99,7 +99,7 @@ void test_vlseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) {
- return vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_tu(
@@ -118,7 +118,7 @@ void test_vlseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) {
- return vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_tum(
@@ -137,7 +137,7 @@ void test_vlseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) {
- return vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_tum(
@@ -156,7 +156,7 @@ void test_vlseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) {
- return vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_tum(
@@ -175,7 +175,7 @@ void test_vlseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) {
- return vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_tum(
@@ -194,7 +194,7 @@ void test_vlseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) {
- return vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_tum(
@@ -213,7 +213,7 @@ void test_vlseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) {
- return vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_tum(
@@ -232,7 +232,7 @@ void test_vlseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) {
- return vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_tumu(
@@ -251,7 +251,7 @@ void test_vlseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) {
- return vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_tumu(
@@ -270,7 +270,7 @@ void test_vlseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) {
- return vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_tumu(
@@ -289,7 +289,7 @@ void test_vlseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) {
- return vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_tumu(
@@ -308,7 +308,7 @@ void test_vlseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) {
- return vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_tumu(
@@ -327,7 +327,7 @@ void test_vlseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) {
- return vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_tumu(
@@ -346,6 +346,6 @@ void test_vlseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) {
- return vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c
index 4b1371ef9bf2..e828115e78ff 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_tu(
@@ -46,7 +46,7 @@ void test_vlseg5e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_tu(
@@ -67,7 +67,7 @@ void test_vlseg5e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_tu(
@@ -88,7 +88,7 @@ void test_vlseg5e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_tu(
@@ -109,7 +109,7 @@ void test_vlseg5e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_tu(
@@ -130,7 +130,7 @@ void test_vlseg5e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_tum(
@@ -151,7 +151,7 @@ void test_vlseg5e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_tum(
@@ -172,7 +172,7 @@ void test_vlseg5e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_tum(
@@ -193,7 +193,7 @@ void test_vlseg5e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_tum(
@@ -214,7 +214,7 @@ void test_vlseg5e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_tum(
@@ -235,7 +235,7 @@ void test_vlseg5e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_tum(
@@ -256,7 +256,7 @@ void test_vlseg5e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_tumu(
@@ -277,7 +277,7 @@ void test_vlseg5e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_tumu(
@@ -298,7 +298,7 @@ void test_vlseg5e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_tumu(
@@ -319,7 +319,7 @@ void test_vlseg5e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_tumu(
@@ -340,7 +340,7 @@ void test_vlseg5e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_tumu(
@@ -361,7 +361,7 @@ void test_vlseg5e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_tumu(
@@ -382,6 +382,6 @@ void test_vlseg5e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64.c
index 9a3617be654f..6a5bb8009218 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) {
- return vlseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_tu(
@@ -42,7 +42,7 @@ void test_vlseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) {
- return vlseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_tu(
@@ -61,7 +61,7 @@ void test_vlseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) {
- return vlseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_tum(
@@ -80,7 +80,7 @@ void test_vlseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) {
- return vlseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_tum(
@@ -99,7 +99,7 @@ void test_vlseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) {
- return vlseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_tum(
@@ -118,7 +118,7 @@ void test_vlseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) {
- return vlseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_tumu(
@@ -137,7 +137,7 @@ void test_vlseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) {
- return vlseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_tumu(
@@ -156,7 +156,7 @@ void test_vlseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) {
- return vlseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_tumu(
@@ -175,6 +175,6 @@ void test_vlseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) {
- return vlseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c
index 09c2662db7a0..7db4e23b3941 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e64ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_tu(
@@ -46,7 +46,7 @@ void test_vlseg5e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e64ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_tu(
@@ -67,7 +67,7 @@ void test_vlseg5e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e64ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_tum(
@@ -88,7 +88,7 @@ void test_vlseg5e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e64ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_tum(
@@ -109,7 +109,7 @@ void test_vlseg5e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e64ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_tum(
@@ -130,7 +130,7 @@ void test_vlseg5e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e64ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_tumu(
@@ -151,7 +151,7 @@ void test_vlseg5e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e64ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_tumu(
@@ -172,7 +172,7 @@ void test_vlseg5e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e64ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_tumu(
@@ -193,6 +193,6 @@ void test_vlseg5e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e64ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e64ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8.c
index 2002292c9a13..a91387c6c204 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8.c
@@ -22,7 +22,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_tu(
@@ -41,7 +41,7 @@ void test_vlseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_tu(
@@ -60,7 +60,7 @@ void test_vlseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_tu(
@@ -79,7 +79,7 @@ void test_vlseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_tu(
@@ -98,7 +98,7 @@ void test_vlseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_tu(
@@ -117,7 +117,7 @@ void test_vlseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_tu(
@@ -136,7 +136,7 @@ void test_vlseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_tu(
@@ -155,7 +155,7 @@ void test_vlseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_tum(
@@ -174,7 +174,7 @@ void test_vlseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_tum(
@@ -193,7 +193,7 @@ void test_vlseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_tum(
@@ -212,7 +212,7 @@ void test_vlseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_tum(
@@ -231,7 +231,7 @@ void test_vlseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_tum(
@@ -250,7 +250,7 @@ void test_vlseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_tum(
@@ -269,7 +269,7 @@ void test_vlseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_tum(
@@ -288,7 +288,7 @@ void test_vlseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_tum(
@@ -307,7 +307,7 @@ void test_vlseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_tumu(
@@ -326,7 +326,7 @@ void test_vlseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_tumu(
@@ -345,7 +345,7 @@ void test_vlseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_tumu(
@@ -364,7 +364,7 @@ void test_vlseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_tumu(
@@ -383,7 +383,7 @@ void test_vlseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) {
- return vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_tumu(
@@ -402,7 +402,7 @@ void test_vlseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_tumu(
@@ -421,7 +421,7 @@ void test_vlseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_tumu(
@@ -440,7 +440,7 @@ void test_vlseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_tumu(
@@ -459,6 +459,6 @@ void test_vlseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) {
- return vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+ return __riscv_vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c
index 5ad495544037..16ceefa5328c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_tu(
@@ -46,7 +46,7 @@ void test_vlseg5e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_tu(
@@ -67,7 +67,7 @@ void test_vlseg5e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_tu(
@@ -88,7 +88,7 @@ void test_vlseg5e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_tu(
@@ -109,7 +109,7 @@ void test_vlseg5e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_tu(
@@ -130,7 +130,7 @@ void test_vlseg5e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_tu(
@@ -151,7 +151,7 @@ void test_vlseg5e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_tu(
@@ -172,7 +172,7 @@ void test_vlseg5e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_tum(
@@ -193,7 +193,7 @@ void test_vlseg5e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_tum(
@@ -214,7 +214,7 @@ void test_vlseg5e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_tum(
@@ -235,7 +235,7 @@ void test_vlseg5e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_tum(
@@ -256,7 +256,7 @@ void test_vlseg5e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_tum(
@@ -277,7 +277,7 @@ void test_vlseg5e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_tum(
@@ -298,7 +298,7 @@ void test_vlseg5e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_tum(
@@ -319,7 +319,7 @@ void test_vlseg5e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_tum(
@@ -340,7 +340,7 @@ void test_vlseg5e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_tumu(
@@ -361,7 +361,7 @@ void test_vlseg5e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_tumu(
@@ -382,7 +382,7 @@ void test_vlseg5e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_tumu(
@@ -403,7 +403,7 @@ void test_vlseg5e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_tumu(
@@ -424,7 +424,7 @@ void test_vlseg5e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_tumu(
@@ -445,7 +445,7 @@ void test_vlseg5e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_tumu(
@@ -466,7 +466,7 @@ void test_vlseg5e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_tumu(
@@ -487,7 +487,7 @@ void test_vlseg5e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_tumu(
@@ -508,6 +508,6 @@ void test_vlseg5e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg5e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+ return __riscv_vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16.c
index 4eb808365de7..142174ea808f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) {
- return vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vlseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) {
- return vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vlseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) {
- return vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_tu(
@@ -88,7 +88,7 @@ void test_vlseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) {
- return vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_tu(
@@ -109,7 +109,7 @@ void test_vlseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) {
- return vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_tu(
@@ -130,7 +130,7 @@ void test_vlseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) {
- return vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_tu(
@@ -151,7 +151,7 @@ void test_vlseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) {
- return vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_tu(
@@ -172,7 +172,7 @@ void test_vlseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) {
- return vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_tu(
@@ -193,7 +193,7 @@ void test_vlseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) {
- return vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_tum(
@@ -214,7 +214,7 @@ void test_vlseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) {
- return vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_tum(
@@ -235,7 +235,7 @@ void test_vlseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) {
- return vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_tum(
@@ -256,7 +256,7 @@ void test_vlseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) {
- return vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_tum(
@@ -277,7 +277,7 @@ void test_vlseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) {
- return vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_tum(
@@ -298,7 +298,7 @@ void test_vlseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) {
- return vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_tum(
@@ -319,7 +319,7 @@ void test_vlseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) {
- return vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_tum(
@@ -340,7 +340,7 @@ void test_vlseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) {
- return vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_tum(
@@ -361,7 +361,7 @@ void test_vlseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) {
- return vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_tum(
@@ -382,7 +382,7 @@ void test_vlseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) {
- return vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_tumu(
@@ -403,7 +403,7 @@ void test_vlseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) {
- return vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_tumu(
@@ -424,7 +424,7 @@ void test_vlseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) {
- return vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_tumu(
@@ -445,7 +445,7 @@ void test_vlseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) {
- return vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_tumu(
@@ -466,7 +466,7 @@ void test_vlseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) {
- return vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_tumu(
@@ -487,7 +487,7 @@ void test_vlseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) {
- return vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_tumu(
@@ -508,7 +508,7 @@ void test_vlseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) {
- return vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_tumu(
@@ -529,7 +529,7 @@ void test_vlseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) {
- return vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_tumu(
@@ -550,7 +550,7 @@ void test_vlseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) {
- return vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_tumu(
@@ -571,6 +571,6 @@ void test_vlseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) {
- return vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c
index 2c7920601340..ca83f90afce0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vlseg6e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vlseg6e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_tu(
@@ -96,7 +96,7 @@ void test_vlseg6e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_tu(
@@ -119,7 +119,7 @@ void test_vlseg6e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_tu(
@@ -142,7 +142,7 @@ void test_vlseg6e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_tu(
@@ -165,7 +165,7 @@ void test_vlseg6e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_tu(
@@ -188,7 +188,7 @@ void test_vlseg6e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_tu(
@@ -211,7 +211,7 @@ void test_vlseg6e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_tum(
@@ -234,7 +234,7 @@ void test_vlseg6e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_tum(
@@ -257,7 +257,7 @@ void test_vlseg6e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_tum(
@@ -280,7 +280,7 @@ void test_vlseg6e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_tum(
@@ -303,7 +303,7 @@ void test_vlseg6e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_tum(
@@ -326,7 +326,7 @@ void test_vlseg6e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_tum(
@@ -349,7 +349,7 @@ void test_vlseg6e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_tum(
@@ -372,7 +372,7 @@ void test_vlseg6e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_tum(
@@ -395,7 +395,7 @@ void test_vlseg6e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_tum(
@@ -418,7 +418,7 @@ void test_vlseg6e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_tumu(
@@ -441,7 +441,7 @@ void test_vlseg6e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_tumu(
@@ -464,7 +464,7 @@ void test_vlseg6e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_tumu(
@@ -487,7 +487,7 @@ void test_vlseg6e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_tumu(
@@ -510,7 +510,7 @@ void test_vlseg6e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_tumu(
@@ -533,7 +533,7 @@ void test_vlseg6e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_tumu(
@@ -556,7 +556,7 @@ void test_vlseg6e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_tumu(
@@ -579,7 +579,7 @@ void test_vlseg6e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_tumu(
@@ -602,7 +602,7 @@ void test_vlseg6e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_tumu(
@@ -625,6 +625,6 @@ void test_vlseg6e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32.c
index e03f8f7257d8..ee97c834d534 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) {
- return vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_tu(
@@ -46,7 +46,7 @@ void test_vlseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) {
- return vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_tu(
@@ -67,7 +67,7 @@ void test_vlseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) {
- return vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_tu(
@@ -88,7 +88,7 @@ void test_vlseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) {
- return vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_tu(
@@ -109,7 +109,7 @@ void test_vlseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) {
- return vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_tu(
@@ -130,7 +130,7 @@ void test_vlseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) {
- return vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_tum(
@@ -151,7 +151,7 @@ void test_vlseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) {
- return vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_tum(
@@ -172,7 +172,7 @@ void test_vlseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) {
- return vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_tum(
@@ -193,7 +193,7 @@ void test_vlseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) {
- return vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_tum(
@@ -214,7 +214,7 @@ void test_vlseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) {
- return vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_tum(
@@ -235,7 +235,7 @@ void test_vlseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) {
- return vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_tum(
@@ -256,7 +256,7 @@ void test_vlseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) {
- return vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_tumu(
@@ -277,7 +277,7 @@ void test_vlseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) {
- return vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_tumu(
@@ -298,7 +298,7 @@ void test_vlseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) {
- return vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_tumu(
@@ -319,7 +319,7 @@ void test_vlseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) {
- return vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_tumu(
@@ -340,7 +340,7 @@ void test_vlseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) {
- return vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_tumu(
@@ -361,7 +361,7 @@ void test_vlseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) {
- return vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_tumu(
@@ -382,6 +382,6 @@ void test_vlseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) {
- return vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c
index 693fceaca4be..06dfe0a1c170 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_tu(
@@ -50,7 +50,7 @@ void test_vlseg6e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_tu(
@@ -73,7 +73,7 @@ void test_vlseg6e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_tu(
@@ -96,7 +96,7 @@ void test_vlseg6e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_tu(
@@ -119,7 +119,7 @@ void test_vlseg6e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_tu(
@@ -142,7 +142,7 @@ void test_vlseg6e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_tum(
@@ -165,7 +165,7 @@ void test_vlseg6e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_tum(
@@ -188,7 +188,7 @@ void test_vlseg6e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_tum(
@@ -211,7 +211,7 @@ void test_vlseg6e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_tum(
@@ -234,7 +234,7 @@ void test_vlseg6e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_tum(
@@ -257,7 +257,7 @@ void test_vlseg6e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_tum(
@@ -280,7 +280,7 @@ void test_vlseg6e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_tumu(
@@ -303,7 +303,7 @@ void test_vlseg6e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_tumu(
@@ -326,7 +326,7 @@ void test_vlseg6e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_tumu(
@@ -349,7 +349,7 @@ void test_vlseg6e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_tumu(
@@ -372,7 +372,7 @@ void test_vlseg6e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_tumu(
@@ -395,7 +395,7 @@ void test_vlseg6e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_tumu(
@@ -418,6 +418,6 @@ void test_vlseg6e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64.c
index 8cbf28cca0e7..0ca8df3e5f12 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) {
- return vlseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_tu(
@@ -46,7 +46,7 @@ void test_vlseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) {
- return vlseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_tu(
@@ -67,7 +67,7 @@ void test_vlseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) {
- return vlseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_tum(
@@ -88,7 +88,7 @@ void test_vlseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) {
- return vlseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_tum(
@@ -109,7 +109,7 @@ void test_vlseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) {
- return vlseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_tum(
@@ -130,7 +130,7 @@ void test_vlseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) {
- return vlseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_tumu(
@@ -151,7 +151,7 @@ void test_vlseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) {
- return vlseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_tumu(
@@ -172,7 +172,7 @@ void test_vlseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) {
- return vlseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_tumu(
@@ -193,6 +193,6 @@ void test_vlseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) {
- return vlseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c
index 100fe7067406..a033336d9477 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e64ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_tu(
@@ -50,7 +50,7 @@ void test_vlseg6e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e64ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_tu(
@@ -73,7 +73,7 @@ void test_vlseg6e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e64ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_tum(
@@ -96,7 +96,7 @@ void test_vlseg6e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e64ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_tum(
@@ -119,7 +119,7 @@ void test_vlseg6e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e64ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_tum(
@@ -142,7 +142,7 @@ void test_vlseg6e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e64ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_tumu(
@@ -165,7 +165,7 @@ void test_vlseg6e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e64ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_tumu(
@@ -188,7 +188,7 @@ void test_vlseg6e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e64ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_tumu(
@@ -211,6 +211,6 @@ void test_vlseg6e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e64ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e64ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8.c
index 7360fc6c6df1..812d38d8391c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8.c
@@ -24,7 +24,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_tu(
@@ -45,7 +45,7 @@ void test_vlseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_tu(
@@ -66,7 +66,7 @@ void test_vlseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_tu(
@@ -87,7 +87,7 @@ void test_vlseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_tu(
@@ -108,7 +108,7 @@ void test_vlseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_tu(
@@ -129,7 +129,7 @@ void test_vlseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_tu(
@@ -150,7 +150,7 @@ void test_vlseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_tu(
@@ -171,7 +171,7 @@ void test_vlseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_tum(
@@ -192,7 +192,7 @@ void test_vlseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_tum(
@@ -213,7 +213,7 @@ void test_vlseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_tum(
@@ -234,7 +234,7 @@ void test_vlseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_tum(
@@ -255,7 +255,7 @@ void test_vlseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_tum(
@@ -276,7 +276,7 @@ void test_vlseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_tum(
@@ -297,7 +297,7 @@ void test_vlseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_tum(
@@ -318,7 +318,7 @@ void test_vlseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_tum(
@@ -339,7 +339,7 @@ void test_vlseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_tumu(
@@ -360,7 +360,7 @@ void test_vlseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_tumu(
@@ -381,7 +381,7 @@ void test_vlseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_tumu(
@@ -402,7 +402,7 @@ void test_vlseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_tumu(
@@ -423,7 +423,7 @@ void test_vlseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) {
- return vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_tumu(
@@ -444,7 +444,7 @@ void test_vlseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_tumu(
@@ -465,7 +465,7 @@ void test_vlseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_tumu(
@@ -486,7 +486,7 @@ void test_vlseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_tumu(
@@ -507,6 +507,6 @@ void test_vlseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) {
- return vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+ return __riscv_vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c
index 7a7f0a9cea4f..650d421888b9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_tu(
@@ -50,7 +50,7 @@ void test_vlseg6e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_tu(
@@ -73,7 +73,7 @@ void test_vlseg6e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_tu(
@@ -96,7 +96,7 @@ void test_vlseg6e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_tu(
@@ -119,7 +119,7 @@ void test_vlseg6e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_tu(
@@ -142,7 +142,7 @@ void test_vlseg6e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_tu(
@@ -165,7 +165,7 @@ void test_vlseg6e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_tu(
@@ -188,7 +188,7 @@ void test_vlseg6e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_tum(
@@ -211,7 +211,7 @@ void test_vlseg6e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_tum(
@@ -234,7 +234,7 @@ void test_vlseg6e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_tum(
@@ -257,7 +257,7 @@ void test_vlseg6e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_tum(
@@ -280,7 +280,7 @@ void test_vlseg6e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_tum(
@@ -303,7 +303,7 @@ void test_vlseg6e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_tum(
@@ -326,7 +326,7 @@ void test_vlseg6e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_tum(
@@ -349,7 +349,7 @@ void test_vlseg6e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_tum(
@@ -372,7 +372,7 @@ void test_vlseg6e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_tumu(
@@ -395,7 +395,7 @@ void test_vlseg6e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_tumu(
@@ -418,7 +418,7 @@ void test_vlseg6e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_tumu(
@@ -441,7 +441,7 @@ void test_vlseg6e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_tumu(
@@ -464,7 +464,7 @@ void test_vlseg6e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_tumu(
@@ -487,7 +487,7 @@ void test_vlseg6e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_tumu(
@@ -510,7 +510,7 @@ void test_vlseg6e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_tumu(
@@ -533,7 +533,7 @@ void test_vlseg6e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_tumu(
@@ -556,6 +556,6 @@ void test_vlseg6e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg6e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+ return __riscv_vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16.c
index 03bc3c6642ff..b33b3999f8ff 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) {
- return vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vlseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) {
- return vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vlseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) {
- return vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_tu(
@@ -96,7 +96,7 @@ void test_vlseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) {
- return vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_tu(
@@ -119,7 +119,7 @@ void test_vlseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) {
- return vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_tu(
@@ -142,7 +142,7 @@ void test_vlseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) {
- return vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_tu(
@@ -165,7 +165,7 @@ void test_vlseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) {
- return vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_tu(
@@ -188,7 +188,7 @@ void test_vlseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) {
- return vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_tu(
@@ -211,7 +211,7 @@ void test_vlseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) {
- return vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_tum(
@@ -234,7 +234,7 @@ void test_vlseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) {
- return vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_tum(
@@ -257,7 +257,7 @@ void test_vlseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) {
- return vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_tum(
@@ -280,7 +280,7 @@ void test_vlseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) {
- return vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_tum(
@@ -303,7 +303,7 @@ void test_vlseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) {
- return vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_tum(
@@ -326,7 +326,7 @@ void test_vlseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) {
- return vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_tum(
@@ -349,7 +349,7 @@ void test_vlseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) {
- return vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_tum(
@@ -372,7 +372,7 @@ void test_vlseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) {
- return vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_tum(
@@ -395,7 +395,7 @@ void test_vlseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) {
- return vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_tum(
@@ -418,7 +418,7 @@ void test_vlseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) {
- return vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_tumu(
@@ -441,7 +441,7 @@ void test_vlseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) {
- return vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_tumu(
@@ -464,7 +464,7 @@ void test_vlseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) {
- return vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_tumu(
@@ -487,7 +487,7 @@ void test_vlseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) {
- return vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_tumu(
@@ -510,7 +510,7 @@ void test_vlseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) {
- return vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_tumu(
@@ -533,7 +533,7 @@ void test_vlseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) {
- return vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_tumu(
@@ -556,7 +556,7 @@ void test_vlseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) {
- return vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_tumu(
@@ -579,7 +579,7 @@ void test_vlseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) {
- return vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_tumu(
@@ -602,7 +602,7 @@ void test_vlseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) {
- return vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_tumu(
@@ -625,6 +625,6 @@ void test_vlseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) {
- return vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c
index 883b35c2b010..3bf299a94231 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vlseg7e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vlseg7e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_tu(
@@ -104,7 +104,7 @@ void test_vlseg7e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_tu(
@@ -129,7 +129,7 @@ void test_vlseg7e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_tu(
@@ -154,7 +154,7 @@ void test_vlseg7e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_tu(
@@ -179,7 +179,7 @@ void test_vlseg7e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_tu(
@@ -204,7 +204,7 @@ void test_vlseg7e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_tu(
@@ -229,7 +229,7 @@ void test_vlseg7e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_tum(
@@ -254,7 +254,7 @@ void test_vlseg7e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_tum(
@@ -279,7 +279,7 @@ void test_vlseg7e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_tum(
@@ -304,7 +304,7 @@ void test_vlseg7e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_tum(
@@ -329,7 +329,7 @@ void test_vlseg7e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_tum(
@@ -354,7 +354,7 @@ void test_vlseg7e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_tum(
@@ -379,7 +379,7 @@ void test_vlseg7e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_tum(
@@ -404,7 +404,7 @@ void test_vlseg7e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_tum(
@@ -429,7 +429,7 @@ void test_vlseg7e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_tum(
@@ -454,7 +454,7 @@ void test_vlseg7e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_tumu(
@@ -479,7 +479,7 @@ void test_vlseg7e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_tumu(
@@ -504,7 +504,7 @@ void test_vlseg7e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_tumu(
@@ -529,7 +529,7 @@ void test_vlseg7e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_tumu(
@@ -554,7 +554,7 @@ void test_vlseg7e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_tumu(
@@ -579,7 +579,7 @@ void test_vlseg7e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_tumu(
@@ -604,7 +604,7 @@ void test_vlseg7e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_tumu(
@@ -629,7 +629,7 @@ void test_vlseg7e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_tumu(
@@ -654,7 +654,7 @@ void test_vlseg7e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_tumu(
@@ -679,6 +679,6 @@ void test_vlseg7e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32.c
index fb57749fa917..d46c84bf4611 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) {
- return vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_tu(
@@ -50,7 +50,7 @@ void test_vlseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) {
- return vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_tu(
@@ -73,7 +73,7 @@ void test_vlseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) {
- return vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_tu(
@@ -96,7 +96,7 @@ void test_vlseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) {
- return vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_tu(
@@ -119,7 +119,7 @@ void test_vlseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) {
- return vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_tu(
@@ -142,7 +142,7 @@ void test_vlseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) {
- return vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_tum(
@@ -165,7 +165,7 @@ void test_vlseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) {
- return vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_tum(
@@ -188,7 +188,7 @@ void test_vlseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) {
- return vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_tum(
@@ -211,7 +211,7 @@ void test_vlseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) {
- return vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_tum(
@@ -234,7 +234,7 @@ void test_vlseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) {
- return vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_tum(
@@ -257,7 +257,7 @@ void test_vlseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) {
- return vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_tum(
@@ -280,7 +280,7 @@ void test_vlseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) {
- return vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_tumu(
@@ -303,7 +303,7 @@ void test_vlseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) {
- return vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_tumu(
@@ -326,7 +326,7 @@ void test_vlseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) {
- return vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_tumu(
@@ -349,7 +349,7 @@ void test_vlseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) {
- return vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_tumu(
@@ -372,7 +372,7 @@ void test_vlseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) {
- return vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_tumu(
@@ -395,7 +395,7 @@ void test_vlseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) {
- return vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_tumu(
@@ -418,6 +418,6 @@ void test_vlseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) {
- return vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c
index 92dcc3995203..72e082b48d83 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_tu(
@@ -54,7 +54,7 @@ void test_vlseg7e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_tu(
@@ -79,7 +79,7 @@ void test_vlseg7e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_tu(
@@ -104,7 +104,7 @@ void test_vlseg7e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_tu(
@@ -129,7 +129,7 @@ void test_vlseg7e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_tu(
@@ -154,7 +154,7 @@ void test_vlseg7e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_tum(
@@ -179,7 +179,7 @@ void test_vlseg7e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_tum(
@@ -204,7 +204,7 @@ void test_vlseg7e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_tum(
@@ -229,7 +229,7 @@ void test_vlseg7e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_tum(
@@ -254,7 +254,7 @@ void test_vlseg7e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_tum(
@@ -279,7 +279,7 @@ void test_vlseg7e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_tum(
@@ -304,7 +304,7 @@ void test_vlseg7e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_tumu(
@@ -329,7 +329,7 @@ void test_vlseg7e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_tumu(
@@ -354,7 +354,7 @@ void test_vlseg7e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_tumu(
@@ -379,7 +379,7 @@ void test_vlseg7e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_tumu(
@@ -404,7 +404,7 @@ void test_vlseg7e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_tumu(
@@ -429,7 +429,7 @@ void test_vlseg7e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_tumu(
@@ -454,6 +454,6 @@ void test_vlseg7e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64.c
index 830f1b425f61..8ec9f74b6d3b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) {
- return vlseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_tu(
@@ -50,7 +50,7 @@ void test_vlseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) {
- return vlseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_tu(
@@ -73,7 +73,7 @@ void test_vlseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) {
- return vlseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_tum(
@@ -96,7 +96,7 @@ void test_vlseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) {
- return vlseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_tum(
@@ -119,7 +119,7 @@ void test_vlseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) {
- return vlseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_tum(
@@ -142,7 +142,7 @@ void test_vlseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) {
- return vlseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_tumu(
@@ -165,7 +165,7 @@ void test_vlseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) {
- return vlseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_tumu(
@@ -188,7 +188,7 @@ void test_vlseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) {
- return vlseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_tumu(
@@ -211,6 +211,6 @@ void test_vlseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) {
- return vlseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c
index 5ad8fd938970..9f3074dbaff3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e64ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_tu(
@@ -54,7 +54,7 @@ void test_vlseg7e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e64ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_tu(
@@ -79,7 +79,7 @@ void test_vlseg7e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e64ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_tum(
@@ -104,7 +104,7 @@ void test_vlseg7e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e64ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_tum(
@@ -129,7 +129,7 @@ void test_vlseg7e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e64ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_tum(
@@ -154,7 +154,7 @@ void test_vlseg7e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e64ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_tumu(
@@ -179,7 +179,7 @@ void test_vlseg7e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_tumu(
@@ -204,7 +204,7 @@ void test_vlseg7e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_tumu(
@@ -229,6 +229,6 @@ void test_vlseg7e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8.c
index 972273c1a8db..7b84affea90f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8.c
@@ -26,7 +26,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_tu(
@@ -49,7 +49,7 @@ void test_vlseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_tu(
@@ -72,7 +72,7 @@ void test_vlseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_tu(
@@ -95,7 +95,7 @@ void test_vlseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_tu(
@@ -118,7 +118,7 @@ void test_vlseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_tu(
@@ -141,7 +141,7 @@ void test_vlseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_tu(
@@ -164,7 +164,7 @@ void test_vlseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_tu(
@@ -187,7 +187,7 @@ void test_vlseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_tum(
@@ -210,7 +210,7 @@ void test_vlseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_tum(
@@ -233,7 +233,7 @@ void test_vlseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_tum(
@@ -256,7 +256,7 @@ void test_vlseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_tum(
@@ -279,7 +279,7 @@ void test_vlseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_tum(
@@ -302,7 +302,7 @@ void test_vlseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_tum(
@@ -325,7 +325,7 @@ void test_vlseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_tum(
@@ -348,7 +348,7 @@ void test_vlseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_tum(
@@ -371,7 +371,7 @@ void test_vlseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_tumu(
@@ -394,7 +394,7 @@ void test_vlseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_tumu(
@@ -417,7 +417,7 @@ void test_vlseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_tumu(
@@ -440,7 +440,7 @@ void test_vlseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_tumu(
@@ -463,7 +463,7 @@ void test_vlseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) {
- return vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_tumu(
@@ -486,7 +486,7 @@ void test_vlseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_tumu(
@@ -509,7 +509,7 @@ void test_vlseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_tumu(
@@ -532,7 +532,7 @@ void test_vlseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_tumu(
@@ -555,6 +555,6 @@ void test_vlseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) {
- return vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+ return __riscv_vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c
index 1ceaa688199b..0a8aa72087d0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_tu(
@@ -54,7 +54,7 @@ void test_vlseg7e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_tu(
@@ -79,7 +79,7 @@ void test_vlseg7e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_tu(
@@ -104,7 +104,7 @@ void test_vlseg7e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_tu(
@@ -129,7 +129,7 @@ void test_vlseg7e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_tu(
@@ -154,7 +154,7 @@ void test_vlseg7e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_tu(
@@ -179,7 +179,7 @@ void test_vlseg7e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_tu(
@@ -204,7 +204,7 @@ void test_vlseg7e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_tum(
@@ -229,7 +229,7 @@ void test_vlseg7e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_tum(
@@ -254,7 +254,7 @@ void test_vlseg7e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_tum(
@@ -279,7 +279,7 @@ void test_vlseg7e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_tum(
@@ -304,7 +304,7 @@ void test_vlseg7e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_tum(
@@ -329,7 +329,7 @@ void test_vlseg7e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_tum(
@@ -354,7 +354,7 @@ void test_vlseg7e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_tum(
@@ -379,7 +379,7 @@ void test_vlseg7e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_tum(
@@ -404,7 +404,7 @@ void test_vlseg7e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_tumu(
@@ -429,7 +429,7 @@ void test_vlseg7e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_tumu(
@@ -454,7 +454,7 @@ void test_vlseg7e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_tumu(
@@ -479,7 +479,7 @@ void test_vlseg7e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_tumu(
@@ -504,7 +504,7 @@ void test_vlseg7e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_tumu(
@@ -529,7 +529,7 @@ void test_vlseg7e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_tumu(
@@ -554,7 +554,7 @@ void test_vlseg7e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_tumu(
@@ -579,7 +579,7 @@ void test_vlseg7e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_tumu(
@@ -604,6 +604,6 @@ void test_vlseg7e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg7e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+ return __riscv_vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16.c
index a7c00593b16e..b9937799727b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) {
- return vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vlseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) {
- return vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vlseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) {
- return vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_tu(
@@ -104,7 +104,7 @@ void test_vlseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) {
- return vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_tu(
@@ -129,7 +129,7 @@ void test_vlseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) {
- return vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_tu(
@@ -154,7 +154,7 @@ void test_vlseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) {
- return vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_tu(
@@ -179,7 +179,7 @@ void test_vlseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) {
- return vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_tu(
@@ -204,7 +204,7 @@ void test_vlseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) {
- return vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_tu(
@@ -229,7 +229,7 @@ void test_vlseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) {
- return vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_tum(
@@ -254,7 +254,7 @@ void test_vlseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) {
- return vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_tum(
@@ -279,7 +279,7 @@ void test_vlseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) {
- return vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_tum(
@@ -304,7 +304,7 @@ void test_vlseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) {
- return vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_tum(
@@ -329,7 +329,7 @@ void test_vlseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) {
- return vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_tum(
@@ -354,7 +354,7 @@ void test_vlseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) {
- return vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_tum(
@@ -379,7 +379,7 @@ void test_vlseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) {
- return vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_tum(
@@ -404,7 +404,7 @@ void test_vlseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) {
- return vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_tum(
@@ -429,7 +429,7 @@ void test_vlseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) {
- return vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_tum(
@@ -454,7 +454,7 @@ void test_vlseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) {
- return vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_tumu(
@@ -479,7 +479,7 @@ void test_vlseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) {
- return vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_tumu(
@@ -504,7 +504,7 @@ void test_vlseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) {
- return vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_tumu(
@@ -529,7 +529,7 @@ void test_vlseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) {
- return vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_tumu(
@@ -554,7 +554,7 @@ void test_vlseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) {
- return vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_tumu(
@@ -579,7 +579,7 @@ void test_vlseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) {
- return vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_tumu(
@@ -604,7 +604,7 @@ void test_vlseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) {
- return vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_tumu(
@@ -629,7 +629,7 @@ void test_vlseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) {
- return vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_tumu(
@@ -654,7 +654,7 @@ void test_vlseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) {
- return vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_tumu(
@@ -679,6 +679,6 @@ void test_vlseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) {
- return vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c
index f0e8ad633106..7299ab16ac9c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c
@@ -31,7 +31,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_tu(
@@ -58,7 +58,7 @@ void test_vlseg8e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_tu(
@@ -85,7 +85,7 @@ void test_vlseg8e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_tu(
@@ -112,7 +112,7 @@ void test_vlseg8e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_tu(
@@ -139,7 +139,7 @@ void test_vlseg8e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_tu(
@@ -166,7 +166,7 @@ void test_vlseg8e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_tu(
@@ -193,7 +193,7 @@ void test_vlseg8e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_tu(
@@ -220,7 +220,7 @@ void test_vlseg8e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_tu(
@@ -247,7 +247,7 @@ void test_vlseg8e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_tum(
@@ -274,7 +274,7 @@ void test_vlseg8e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_tum(
@@ -301,7 +301,7 @@ void test_vlseg8e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_tum(
@@ -328,7 +328,7 @@ void test_vlseg8e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_tum(
@@ -355,7 +355,7 @@ void test_vlseg8e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_tum(
@@ -382,7 +382,7 @@ void test_vlseg8e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_tum(
@@ -409,7 +409,7 @@ void test_vlseg8e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_tum(
@@ -436,7 +436,7 @@ void test_vlseg8e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_tum(
@@ -463,7 +463,7 @@ void test_vlseg8e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_tum(
@@ -490,7 +490,7 @@ void test_vlseg8e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_tumu(
@@ -517,7 +517,7 @@ void test_vlseg8e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_tumu(
@@ -544,7 +544,7 @@ void test_vlseg8e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_tumu(
@@ -571,7 +571,7 @@ void test_vlseg8e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_tumu(
@@ -598,7 +598,7 @@ void test_vlseg8e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_tumu(
@@ -625,7 +625,7 @@ void test_vlseg8e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_tumu(
@@ -652,7 +652,7 @@ void test_vlseg8e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_tumu(
@@ -679,7 +679,7 @@ void test_vlseg8e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_tumu(
@@ -706,7 +706,7 @@ void test_vlseg8e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_tumu(
@@ -733,6 +733,6 @@ void test_vlseg8e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32.c
index 29d1b92cdc21..8c3ea88b9722 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) {
- return vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_tu(
@@ -54,7 +54,7 @@ void test_vlseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) {
- return vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_tu(
@@ -79,7 +79,7 @@ void test_vlseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) {
- return vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_tu(
@@ -104,7 +104,7 @@ void test_vlseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) {
- return vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_tu(
@@ -129,7 +129,7 @@ void test_vlseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) {
- return vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_tu(
@@ -154,7 +154,7 @@ void test_vlseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) {
- return vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_tum(
@@ -179,7 +179,7 @@ void test_vlseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) {
- return vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_tum(
@@ -204,7 +204,7 @@ void test_vlseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) {
- return vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_tum(
@@ -229,7 +229,7 @@ void test_vlseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) {
- return vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_tum(
@@ -254,7 +254,7 @@ void test_vlseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) {
- return vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_tum(
@@ -279,7 +279,7 @@ void test_vlseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) {
- return vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_tum(
@@ -304,7 +304,7 @@ void test_vlseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) {
- return vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_tumu(
@@ -329,7 +329,7 @@ void test_vlseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) {
- return vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_tumu(
@@ -354,7 +354,7 @@ void test_vlseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) {
- return vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_tumu(
@@ -379,7 +379,7 @@ void test_vlseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) {
- return vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_tumu(
@@ -404,7 +404,7 @@ void test_vlseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) {
- return vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_tumu(
@@ -429,7 +429,7 @@ void test_vlseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) {
- return vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_tumu(
@@ -454,6 +454,6 @@ void test_vlseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) {
- return vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c
index 285bb1e6ccb2..5b5f3b2f475e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c
@@ -31,7 +31,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_tu(
@@ -58,7 +58,7 @@ void test_vlseg8e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_tu(
@@ -85,7 +85,7 @@ void test_vlseg8e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_tu(
@@ -112,7 +112,7 @@ void test_vlseg8e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_tu(
@@ -139,7 +139,7 @@ void test_vlseg8e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_tu(
@@ -166,7 +166,7 @@ void test_vlseg8e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_tum(
@@ -193,7 +193,7 @@ void test_vlseg8e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_tum(
@@ -220,7 +220,7 @@ void test_vlseg8e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_tum(
@@ -247,7 +247,7 @@ void test_vlseg8e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_tum(
@@ -274,7 +274,7 @@ void test_vlseg8e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_tum(
@@ -301,7 +301,7 @@ void test_vlseg8e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_tum(
@@ -328,7 +328,7 @@ void test_vlseg8e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_tumu(
@@ -355,7 +355,7 @@ void test_vlseg8e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_tumu(
@@ -382,7 +382,7 @@ void test_vlseg8e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_tumu(
@@ -409,7 +409,7 @@ void test_vlseg8e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_tumu(
@@ -436,7 +436,7 @@ void test_vlseg8e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_tumu(
@@ -463,7 +463,7 @@ void test_vlseg8e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_tumu(
@@ -490,6 +490,6 @@ void test_vlseg8e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64.c
index e34ef084bd15..738093e0b554 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) {
- return vlseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_tu(
@@ -54,7 +54,7 @@ void test_vlseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) {
- return vlseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_tu(
@@ -79,7 +79,7 @@ void test_vlseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) {
- return vlseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_tum(
@@ -104,7 +104,7 @@ void test_vlseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) {
- return vlseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_tum(
@@ -129,7 +129,7 @@ void test_vlseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) {
- return vlseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_tum(
@@ -154,7 +154,7 @@ void test_vlseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) {
- return vlseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_tumu(
@@ -179,7 +179,7 @@ void test_vlseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) {
- return vlseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_tumu(
@@ -204,7 +204,7 @@ void test_vlseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) {
- return vlseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_tumu(
@@ -229,6 +229,6 @@ void test_vlseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) {
- return vlseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c
index b9747b67110f..79407002c917 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c
@@ -31,7 +31,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e64ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_tu(
@@ -58,7 +58,7 @@ void test_vlseg8e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e64ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_tu(
@@ -85,7 +85,7 @@ void test_vlseg8e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e64ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_tum(
@@ -112,7 +112,7 @@ void test_vlseg8e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e64ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_tum(
@@ -139,7 +139,7 @@ void test_vlseg8e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e64ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_tum(
@@ -166,7 +166,7 @@ void test_vlseg8e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e64ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_tumu(
@@ -193,7 +193,7 @@ void test_vlseg8e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_tumu(
@@ -220,7 +220,7 @@ void test_vlseg8e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_tumu(
@@ -247,6 +247,6 @@ void test_vlseg8e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8.c
index 0bd9cd79c195..7665caf21ebd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8.c
@@ -28,7 +28,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_tu(
@@ -53,7 +53,7 @@ void test_vlseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_tu(
@@ -78,7 +78,7 @@ void test_vlseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_tu(
@@ -103,7 +103,7 @@ void test_vlseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_tu(
@@ -128,7 +128,7 @@ void test_vlseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_tu(
@@ -153,7 +153,7 @@ void test_vlseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_tu(
@@ -178,7 +178,7 @@ void test_vlseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_tu(
@@ -203,7 +203,7 @@ void test_vlseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_tum(
@@ -228,7 +228,7 @@ void test_vlseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_tum(
@@ -253,7 +253,7 @@ void test_vlseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_tum(
@@ -278,7 +278,7 @@ void test_vlseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_tum(
@@ -303,7 +303,7 @@ void test_vlseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_tum(
@@ -328,7 +328,7 @@ void test_vlseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_tum(
@@ -353,7 +353,7 @@ void test_vlseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_tum(
@@ -378,7 +378,7 @@ void test_vlseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_tum(
@@ -403,7 +403,7 @@ void test_vlseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_tumu(
@@ -428,7 +428,7 @@ void test_vlseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_tumu(
@@ -453,7 +453,7 @@ void test_vlseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_tumu(
@@ -478,7 +478,7 @@ void test_vlseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_tumu(
@@ -503,7 +503,7 @@ void test_vlseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) {
- return vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_tumu(
@@ -528,7 +528,7 @@ void test_vlseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_tumu(
@@ -553,7 +553,7 @@ void test_vlseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_tumu(
@@ -578,7 +578,7 @@ void test_vlseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_tumu(
@@ -603,6 +603,6 @@ void test_vlseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) {
- return vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+ return __riscv_vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c
index 7165c6b50501..a1dbe24ae6c9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c
@@ -31,7 +31,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_tu(
@@ -58,7 +58,7 @@ void test_vlseg8e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_tu(
@@ -85,7 +85,7 @@ void test_vlseg8e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_tu(
@@ -112,7 +112,7 @@ void test_vlseg8e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_tu(
@@ -139,7 +139,7 @@ void test_vlseg8e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_tu(
@@ -166,7 +166,7 @@ void test_vlseg8e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_tu(
@@ -193,7 +193,7 @@ void test_vlseg8e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_tu(
@@ -220,7 +220,7 @@ void test_vlseg8e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_tum(
@@ -247,7 +247,7 @@ void test_vlseg8e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_tum(
@@ -274,7 +274,7 @@ void test_vlseg8e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_tum(
@@ -301,7 +301,7 @@ void test_vlseg8e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_tum(
@@ -328,7 +328,7 @@ void test_vlseg8e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_tum(
@@ -355,7 +355,7 @@ void test_vlseg8e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_tum(
@@ -382,7 +382,7 @@ void test_vlseg8e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_tum(
@@ -409,7 +409,7 @@ void test_vlseg8e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_tum(
@@ -436,7 +436,7 @@ void test_vlseg8e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_tumu(
@@ -463,7 +463,7 @@ void test_vlseg8e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_tumu(
@@ -490,7 +490,7 @@ void test_vlseg8e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_tumu(
@@ -517,7 +517,7 @@ void test_vlseg8e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_tumu(
@@ -544,7 +544,7 @@ void test_vlseg8e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_tumu(
@@ -571,7 +571,7 @@ void test_vlseg8e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_tumu(
@@ -598,7 +598,7 @@ void test_vlseg8e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_tumu(
@@ -625,7 +625,7 @@ void test_vlseg8e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_tumu(
@@ -652,6 +652,6 @@ void test_vlseg8e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlseg8e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) {
- return vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+ return __riscv_vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c
index 95b95a7e9f90..6d7efd0cb3c7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vlsseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vlsseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vlsseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_tu(
@@ -69,7 +69,7 @@ void test_vlsseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_tu(
@@ -82,7 +82,7 @@ void test_vlsseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_tu(
@@ -95,7 +95,7 @@ void test_vlsseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_tu(
@@ -108,7 +108,7 @@ void test_vlsseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_tu(
@@ -121,7 +121,7 @@ void test_vlsseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_tu(
@@ -134,7 +134,7 @@ void test_vlsseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_tu(
@@ -147,7 +147,7 @@ void test_vlsseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_tu(
@@ -160,7 +160,7 @@ void test_vlsseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_tu(
@@ -173,7 +173,7 @@ void test_vlsseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_tu(
@@ -186,7 +186,7 @@ void test_vlsseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_tu(
@@ -199,7 +199,7 @@ void test_vlsseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_tum(
@@ -212,7 +212,7 @@ void test_vlsseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_tum(
@@ -225,7 +225,7 @@ void test_vlsseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_tum(
@@ -238,7 +238,7 @@ void test_vlsseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_tum(
@@ -251,7 +251,7 @@ void test_vlsseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_tum(
@@ -264,7 +264,7 @@ void test_vlsseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_tum(
@@ -277,7 +277,7 @@ void test_vlsseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_tum(
@@ -290,7 +290,7 @@ void test_vlsseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_tum(
@@ -303,7 +303,7 @@ void test_vlsseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_tum(
@@ -316,7 +316,7 @@ void test_vlsseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_tum(
@@ -329,7 +329,7 @@ void test_vlsseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_tum(
@@ -342,7 +342,7 @@ void test_vlsseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_tum(
@@ -355,7 +355,7 @@ void test_vlsseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_tum(
@@ -368,7 +368,7 @@ void test_vlsseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_tum(
@@ -381,7 +381,7 @@ void test_vlsseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_tum(
@@ -394,7 +394,7 @@ void test_vlsseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_tumu(
@@ -407,7 +407,7 @@ void test_vlsseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_tumu(
@@ -420,7 +420,7 @@ void test_vlsseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_tumu(
@@ -433,7 +433,7 @@ void test_vlsseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_tumu(
@@ -446,7 +446,7 @@ void test_vlsseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_tumu(
@@ -459,7 +459,7 @@ void test_vlsseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_tumu(
@@ -472,7 +472,7 @@ void test_vlsseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_tumu(
@@ -485,7 +485,7 @@ void test_vlsseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_tumu(
@@ -498,7 +498,7 @@ void test_vlsseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_tumu(
@@ -511,7 +511,7 @@ void test_vlsseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_tumu(
@@ -524,7 +524,7 @@ void test_vlsseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_tumu(
@@ -537,7 +537,7 @@ void test_vlsseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_tumu(
@@ -550,7 +550,7 @@ void test_vlsseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_tumu(
@@ -563,7 +563,7 @@ void test_vlsseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_tumu(
@@ -576,7 +576,7 @@ void test_vlsseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_tumu(
@@ -589,6 +589,6 @@ void test_vlsseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c
index 25833260e172..8d931e397c3b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_tu(
@@ -30,7 +30,7 @@ void test_vlsseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_tu(
@@ -43,7 +43,7 @@ void test_vlsseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_tu(
@@ -56,7 +56,7 @@ void test_vlsseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_tu(
@@ -69,7 +69,7 @@ void test_vlsseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_tu(
@@ -82,7 +82,7 @@ void test_vlsseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_tu(
@@ -95,7 +95,7 @@ void test_vlsseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_tu(
@@ -108,7 +108,7 @@ void test_vlsseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tu(
@@ -121,7 +121,7 @@ void test_vlsseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_tu(
@@ -134,7 +134,7 @@ void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_tu(
@@ -147,7 +147,7 @@ void test_vlsseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_tu(
@@ -160,7 +160,7 @@ void test_vlsseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_tum(
@@ -173,7 +173,7 @@ void test_vlsseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_tum(
@@ -186,7 +186,7 @@ void test_vlsseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_tum(
@@ -199,7 +199,7 @@ void test_vlsseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_tum(
@@ -212,7 +212,7 @@ void test_vlsseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_tum(
@@ -225,7 +225,7 @@ void test_vlsseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_tum(
@@ -238,7 +238,7 @@ void test_vlsseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_tum(
@@ -251,7 +251,7 @@ void test_vlsseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_tum(
@@ -264,7 +264,7 @@ void test_vlsseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tum(
@@ -277,7 +277,7 @@ void test_vlsseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_tum(
@@ -290,7 +290,7 @@ void test_vlsseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_tum(
@@ -303,7 +303,7 @@ void test_vlsseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_tum(
@@ -316,7 +316,7 @@ void test_vlsseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_tumu(
@@ -329,7 +329,7 @@ void test_vlsseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_tumu(
@@ -342,7 +342,7 @@ void test_vlsseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_tumu(
@@ -355,7 +355,7 @@ void test_vlsseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_tumu(
@@ -368,7 +368,7 @@ void test_vlsseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_tumu(
@@ -381,7 +381,7 @@ void test_vlsseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_tumu(
@@ -394,7 +394,7 @@ void test_vlsseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_tumu(
@@ -407,7 +407,7 @@ void test_vlsseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_tumu(
@@ -420,7 +420,7 @@ void test_vlsseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tumu(
@@ -433,7 +433,7 @@ void test_vlsseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_tumu(
@@ -446,7 +446,7 @@ void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_tumu(
@@ -459,7 +459,7 @@ void test_vlsseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_tumu(
@@ -472,6 +472,6 @@ void test_vlsseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c
index 03bcbd03fc16..51bea93b514a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_tu(
@@ -30,7 +30,7 @@ void test_vlsseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_tu(
@@ -43,7 +43,7 @@ void test_vlsseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_tu(
@@ -56,7 +56,7 @@ void test_vlsseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_tu(
@@ -69,7 +69,7 @@ void test_vlsseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_tu(
@@ -82,7 +82,7 @@ void test_vlsseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_tu(
@@ -95,7 +95,7 @@ void test_vlsseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_tu(
@@ -108,7 +108,7 @@ void test_vlsseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_tu(
@@ -121,7 +121,7 @@ void test_vlsseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_tum(
@@ -134,7 +134,7 @@ void test_vlsseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_tum(
@@ -147,7 +147,7 @@ void test_vlsseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_tum(
@@ -160,7 +160,7 @@ void test_vlsseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_tum(
@@ -173,7 +173,7 @@ void test_vlsseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_tum(
@@ -186,7 +186,7 @@ void test_vlsseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_tum(
@@ -199,7 +199,7 @@ void test_vlsseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_tum(
@@ -212,7 +212,7 @@ void test_vlsseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_tum(
@@ -225,7 +225,7 @@ void test_vlsseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_tum(
@@ -238,7 +238,7 @@ void test_vlsseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_tumu(
@@ -251,7 +251,7 @@ void test_vlsseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_tumu(
@@ -264,7 +264,7 @@ void test_vlsseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_tumu(
@@ -277,7 +277,7 @@ void test_vlsseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_tumu(
@@ -290,7 +290,7 @@ void test_vlsseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_tumu(
@@ -303,7 +303,7 @@ void test_vlsseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_tumu(
@@ -316,7 +316,7 @@ void test_vlsseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_tumu(
@@ -329,7 +329,7 @@ void test_vlsseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_tumu(
@@ -342,7 +342,7 @@ void test_vlsseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_tumu(
@@ -355,6 +355,6 @@ void test_vlsseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c
index 2ce68c4f4cee..40821bffeb64 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c
@@ -16,7 +16,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_tu(
@@ -29,7 +29,7 @@ void test_vlsseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_tu(
@@ -42,7 +42,7 @@ void test_vlsseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_tu(
@@ -55,7 +55,7 @@ void test_vlsseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t masked
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_tu(
@@ -68,7 +68,7 @@ void test_vlsseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_tu(
@@ -81,7 +81,7 @@ void test_vlsseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_tu(
@@ -94,7 +94,7 @@ void test_vlsseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_tu(
@@ -107,7 +107,7 @@ void test_vlsseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_tu(
@@ -120,7 +120,7 @@ void test_vlsseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_tu(
@@ -133,7 +133,7 @@ void test_vlsseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_tu(
@@ -146,7 +146,7 @@ void test_vlsseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_tu(
@@ -159,7 +159,7 @@ void test_vlsseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_tum(
@@ -172,7 +172,7 @@ void test_vlsseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_tum(
@@ -185,7 +185,7 @@ void test_vlsseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_tum(
@@ -198,7 +198,7 @@ void test_vlsseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_tum(
@@ -211,7 +211,7 @@ void test_vlsseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_tum(
@@ -224,7 +224,7 @@ void test_vlsseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_tum(
@@ -237,7 +237,7 @@ void test_vlsseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_tum(
@@ -250,7 +250,7 @@ void test_vlsseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_tum(
@@ -263,7 +263,7 @@ void test_vlsseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_tum(
@@ -276,7 +276,7 @@ void test_vlsseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_tum(
@@ -289,7 +289,7 @@ void test_vlsseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_tum(
@@ -302,7 +302,7 @@ void test_vlsseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_tum(
@@ -315,7 +315,7 @@ void test_vlsseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_tumu(
@@ -328,7 +328,7 @@ void test_vlsseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_tumu(
@@ -341,7 +341,7 @@ void test_vlsseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_tumu(
@@ -354,7 +354,7 @@ void test_vlsseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_tumu(
@@ -367,7 +367,7 @@ void test_vlsseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_tumu(
@@ -380,7 +380,7 @@ void test_vlsseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_tumu(
@@ -393,7 +393,7 @@ void test_vlsseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_tumu(
@@ -406,7 +406,7 @@ void test_vlsseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_tumu(
@@ -419,7 +419,7 @@ void test_vlsseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_tumu(
@@ -432,7 +432,7 @@ void test_vlsseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_tumu(
@@ -445,7 +445,7 @@ void test_vlsseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_tumu(
@@ -458,7 +458,7 @@ void test_vlsseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_tumu(
@@ -471,6 +471,6 @@ void test_vlsseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+ return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c
index 0bc328444e8a..4de39d14927e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vlsseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vlsseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vlsseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_tu(
@@ -79,7 +79,7 @@ void test_vlsseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_tu(
@@ -94,7 +94,7 @@ void test_vlsseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_tu(
@@ -109,7 +109,7 @@ void test_vlsseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_tu(
@@ -124,7 +124,7 @@ void test_vlsseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_tu(
@@ -139,7 +139,7 @@ void test_vlsseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_tu(
@@ -154,7 +154,7 @@ void test_vlsseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_tu(
@@ -169,7 +169,7 @@ void test_vlsseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_tu(
@@ -184,7 +184,7 @@ void test_vlsseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_tum(
@@ -199,7 +199,7 @@ void test_vlsseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_tum(
@@ -214,7 +214,7 @@ void test_vlsseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_tum(
@@ -229,7 +229,7 @@ void test_vlsseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_tum(
@@ -244,7 +244,7 @@ void test_vlsseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_tum(
@@ -259,7 +259,7 @@ void test_vlsseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_tum(
@@ -274,7 +274,7 @@ void test_vlsseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_tum(
@@ -289,7 +289,7 @@ void test_vlsseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_tum(
@@ -304,7 +304,7 @@ void test_vlsseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_tum(
@@ -319,7 +319,7 @@ void test_vlsseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_tum(
@@ -334,7 +334,7 @@ void test_vlsseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_tum(
@@ -349,7 +349,7 @@ void test_vlsseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_tum(
@@ -364,7 +364,7 @@ void test_vlsseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_tumu(
@@ -379,7 +379,7 @@ void test_vlsseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_tumu(
@@ -394,7 +394,7 @@ void test_vlsseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_tumu(
@@ -409,7 +409,7 @@ void test_vlsseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_tumu(
@@ -424,7 +424,7 @@ void test_vlsseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_tumu(
@@ -439,7 +439,7 @@ void test_vlsseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_tumu(
@@ -454,7 +454,7 @@ void test_vlsseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_tumu(
@@ -469,7 +469,7 @@ void test_vlsseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_tumu(
@@ -484,7 +484,7 @@ void test_vlsseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_tumu(
@@ -499,7 +499,7 @@ void test_vlsseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_tumu(
@@ -514,7 +514,7 @@ void test_vlsseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_tumu(
@@ -529,7 +529,7 @@ void test_vlsseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_tumu(
@@ -544,6 +544,6 @@ void test_vlsseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c
index ea03ebacbcbc..ec30650f2406 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_tu(
@@ -34,7 +34,7 @@ void test_vlsseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_tu(
@@ -49,7 +49,7 @@ void test_vlsseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_tu(
@@ -64,7 +64,7 @@ void test_vlsseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_tu(
@@ -79,7 +79,7 @@ void test_vlsseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_tu(
@@ -94,7 +94,7 @@ void test_vlsseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_tu(
@@ -109,7 +109,7 @@ void test_vlsseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_tu(
@@ -124,7 +124,7 @@ void test_vlsseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_tu(
@@ -139,7 +139,7 @@ void test_vlsseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_tum(
@@ -154,7 +154,7 @@ void test_vlsseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_tum(
@@ -169,7 +169,7 @@ void test_vlsseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_tum(
@@ -184,7 +184,7 @@ void test_vlsseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_tum(
@@ -199,7 +199,7 @@ void test_vlsseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_tum(
@@ -214,7 +214,7 @@ void test_vlsseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_tum(
@@ -229,7 +229,7 @@ void test_vlsseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_tum(
@@ -244,7 +244,7 @@ void test_vlsseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_tum(
@@ -259,7 +259,7 @@ void test_vlsseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_tum(
@@ -274,7 +274,7 @@ void test_vlsseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_tumu(
@@ -289,7 +289,7 @@ void test_vlsseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_tumu(
@@ -304,7 +304,7 @@ void test_vlsseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_tumu(
@@ -319,7 +319,7 @@ void test_vlsseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_tumu(
@@ -334,7 +334,7 @@ void test_vlsseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_tumu(
@@ -349,7 +349,7 @@ void test_vlsseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_tumu(
@@ -364,7 +364,7 @@ void test_vlsseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_tumu(
@@ -379,7 +379,7 @@ void test_vlsseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_tumu(
@@ -394,7 +394,7 @@ void test_vlsseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_tumu(
@@ -409,6 +409,6 @@ void test_vlsseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c
index e0373dc2d400..da1ea9abe7c5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_tu(
@@ -34,7 +34,7 @@ void test_vlsseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_tu(
@@ -49,7 +49,7 @@ void test_vlsseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_tu(
@@ -64,7 +64,7 @@ void test_vlsseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_tu(
@@ -79,7 +79,7 @@ void test_vlsseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_tu(
@@ -94,7 +94,7 @@ void test_vlsseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_tum(
@@ -109,7 +109,7 @@ void test_vlsseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_tum(
@@ -124,7 +124,7 @@ void test_vlsseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_tum(
@@ -139,7 +139,7 @@ void test_vlsseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_tum(
@@ -154,7 +154,7 @@ void test_vlsseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_tum(
@@ -169,7 +169,7 @@ void test_vlsseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_tum(
@@ -184,7 +184,7 @@ void test_vlsseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_tumu(
@@ -199,7 +199,7 @@ void test_vlsseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_tumu(
@@ -214,7 +214,7 @@ void test_vlsseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_tumu(
@@ -229,7 +229,7 @@ void test_vlsseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_tumu(
@@ -244,7 +244,7 @@ void test_vlsseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_tumu(
@@ -259,7 +259,7 @@ void test_vlsseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_tumu(
@@ -274,6 +274,6 @@ void test_vlsseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c
index dd5790de7c43..2a0a75d8d309 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c
@@ -18,7 +18,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_tu(
@@ -33,7 +33,7 @@ void test_vlsseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_tu(
@@ -48,7 +48,7 @@ void test_vlsseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_tu(
@@ -63,7 +63,7 @@ void test_vlsseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_tu(
@@ -78,7 +78,7 @@ void test_vlsseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_tu(
@@ -93,7 +93,7 @@ void test_vlsseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_tu(
@@ -108,7 +108,7 @@ void test_vlsseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_tu(
@@ -123,7 +123,7 @@ void test_vlsseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_tu(
@@ -138,7 +138,7 @@ void test_vlsseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_tu(
@@ -153,7 +153,7 @@ void test_vlsseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_tum(
@@ -168,7 +168,7 @@ void test_vlsseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_tum(
@@ -183,7 +183,7 @@ void test_vlsseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_tum(
@@ -198,7 +198,7 @@ void test_vlsseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_tum(
@@ -213,7 +213,7 @@ void test_vlsseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_tum(
@@ -228,7 +228,7 @@ void test_vlsseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_tum(
@@ -243,7 +243,7 @@ void test_vlsseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_tum(
@@ -258,7 +258,7 @@ void test_vlsseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_tum(
@@ -273,7 +273,7 @@ void test_vlsseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_tum(
@@ -288,7 +288,7 @@ void test_vlsseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_tum(
@@ -303,7 +303,7 @@ void test_vlsseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_tumu(
@@ -318,7 +318,7 @@ void test_vlsseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_tumu(
@@ -333,7 +333,7 @@ void test_vlsseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_tumu(
@@ -348,7 +348,7 @@ void test_vlsseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_tumu(
@@ -363,7 +363,7 @@ void test_vlsseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_tumu(
@@ -378,7 +378,7 @@ void test_vlsseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_tumu(
@@ -393,7 +393,7 @@ void test_vlsseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_tumu(
@@ -408,7 +408,7 @@ void test_vlsseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_tumu(
@@ -423,7 +423,7 @@ void test_vlsseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_tumu(
@@ -438,7 +438,7 @@ void test_vlsseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_tumu(
@@ -453,6 +453,6 @@ void test_vlsseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+ return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c
index 54fbfc85deba..8d41d97e0f1c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vlsseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vlsseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vlsseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_tu(
@@ -89,7 +89,7 @@ void test_vlsseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_tu(
@@ -106,7 +106,7 @@ void test_vlsseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_tu(
@@ -123,7 +123,7 @@ void test_vlsseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_tu(
@@ -140,7 +140,7 @@ void test_vlsseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_tu(
@@ -157,7 +157,7 @@ void test_vlsseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_tu(
@@ -174,7 +174,7 @@ void test_vlsseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_tu(
@@ -191,7 +191,7 @@ void test_vlsseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_tu(
@@ -208,7 +208,7 @@ void test_vlsseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_tum(
@@ -225,7 +225,7 @@ void test_vlsseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_tum(
@@ -242,7 +242,7 @@ void test_vlsseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_tum(
@@ -259,7 +259,7 @@ void test_vlsseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_tum(
@@ -276,7 +276,7 @@ void test_vlsseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_tum(
@@ -293,7 +293,7 @@ void test_vlsseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_tum(
@@ -310,7 +310,7 @@ void test_vlsseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_tum(
@@ -327,7 +327,7 @@ void test_vlsseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_tum(
@@ -344,7 +344,7 @@ void test_vlsseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_tum(
@@ -361,7 +361,7 @@ void test_vlsseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_tum(
@@ -378,7 +378,7 @@ void test_vlsseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_tum(
@@ -395,7 +395,7 @@ void test_vlsseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_tum(
@@ -412,7 +412,7 @@ void test_vlsseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_tumu(
@@ -429,7 +429,7 @@ void test_vlsseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_tumu(
@@ -446,7 +446,7 @@ void test_vlsseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_tumu(
@@ -463,7 +463,7 @@ void test_vlsseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_tumu(
@@ -480,7 +480,7 @@ void test_vlsseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_tumu(
@@ -497,7 +497,7 @@ void test_vlsseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_tumu(
@@ -514,7 +514,7 @@ void test_vlsseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_tumu(
@@ -531,7 +531,7 @@ void test_vlsseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_tumu(
@@ -548,7 +548,7 @@ void test_vlsseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_tumu(
@@ -565,7 +565,7 @@ void test_vlsseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_tumu(
@@ -582,7 +582,7 @@ void test_vlsseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_tumu(
@@ -599,7 +599,7 @@ void test_vlsseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_tumu(
@@ -616,6 +616,6 @@ void test_vlsseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c
index d1e2c1017c88..73eaf770483f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_tu(
@@ -38,7 +38,7 @@ void test_vlsseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_tu(
@@ -55,7 +55,7 @@ void test_vlsseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_tu(
@@ -72,7 +72,7 @@ void test_vlsseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_tu(
@@ -89,7 +89,7 @@ void test_vlsseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_tu(
@@ -106,7 +106,7 @@ void test_vlsseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_tu(
@@ -123,7 +123,7 @@ void test_vlsseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_tu(
@@ -140,7 +140,7 @@ void test_vlsseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_tu(
@@ -157,7 +157,7 @@ void test_vlsseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_tum(
@@ -174,7 +174,7 @@ void test_vlsseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_tum(
@@ -191,7 +191,7 @@ void test_vlsseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_tum(
@@ -208,7 +208,7 @@ void test_vlsseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_tum(
@@ -225,7 +225,7 @@ void test_vlsseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_tum(
@@ -242,7 +242,7 @@ void test_vlsseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_tum(
@@ -259,7 +259,7 @@ void test_vlsseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_tum(
@@ -276,7 +276,7 @@ void test_vlsseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_tum(
@@ -293,7 +293,7 @@ void test_vlsseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_tum(
@@ -310,7 +310,7 @@ void test_vlsseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_tumu(
@@ -327,7 +327,7 @@ void test_vlsseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_tumu(
@@ -344,7 +344,7 @@ void test_vlsseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_tumu(
@@ -361,7 +361,7 @@ void test_vlsseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_tumu(
@@ -378,7 +378,7 @@ void test_vlsseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_tumu(
@@ -395,7 +395,7 @@ void test_vlsseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_tumu(
@@ -412,7 +412,7 @@ void test_vlsseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_tumu(
@@ -429,7 +429,7 @@ void test_vlsseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_tumu(
@@ -446,7 +446,7 @@ void test_vlsseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_tumu(
@@ -463,6 +463,6 @@ void test_vlsseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c
index 6085396e2fe8..5630274ddde4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_tu(
@@ -38,7 +38,7 @@ void test_vlsseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_tu(
@@ -55,7 +55,7 @@ void test_vlsseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_tu(
@@ -72,7 +72,7 @@ void test_vlsseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_tu(
@@ -89,7 +89,7 @@ void test_vlsseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_tu(
@@ -106,7 +106,7 @@ void test_vlsseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_tum(
@@ -123,7 +123,7 @@ void test_vlsseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_tum(
@@ -140,7 +140,7 @@ void test_vlsseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_tum(
@@ -157,7 +157,7 @@ void test_vlsseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_tum(
@@ -174,7 +174,7 @@ void test_vlsseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_tum(
@@ -191,7 +191,7 @@ void test_vlsseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_tum(
@@ -208,7 +208,7 @@ void test_vlsseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_tumu(
@@ -225,7 +225,7 @@ void test_vlsseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_tumu(
@@ -242,7 +242,7 @@ void test_vlsseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_tumu(
@@ -259,7 +259,7 @@ void test_vlsseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_tumu(
@@ -276,7 +276,7 @@ void test_vlsseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_tumu(
@@ -293,7 +293,7 @@ void test_vlsseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_tumu(
@@ -310,6 +310,6 @@ void test_vlsseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c
index d40de63237c1..146ed5d6e5c1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c
@@ -20,7 +20,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_tu(
@@ -37,7 +37,7 @@ void test_vlsseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_tu(
@@ -54,7 +54,7 @@ void test_vlsseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_tu(
@@ -71,7 +71,7 @@ void test_vlsseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_tu(
@@ -88,7 +88,7 @@ void test_vlsseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_tu(
@@ -105,7 +105,7 @@ void test_vlsseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_tu(
@@ -122,7 +122,7 @@ void test_vlsseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_tu(
@@ -139,7 +139,7 @@ void test_vlsseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_tu(
@@ -156,7 +156,7 @@ void test_vlsseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_tu(
@@ -173,7 +173,7 @@ void test_vlsseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_tum(
@@ -190,7 +190,7 @@ void test_vlsseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_tum(
@@ -207,7 +207,7 @@ void test_vlsseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_tum(
@@ -224,7 +224,7 @@ void test_vlsseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_tum(
@@ -241,7 +241,7 @@ void test_vlsseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_tum(
@@ -258,7 +258,7 @@ void test_vlsseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_tum(
@@ -275,7 +275,7 @@ void test_vlsseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_tum(
@@ -292,7 +292,7 @@ void test_vlsseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_tum(
@@ -309,7 +309,7 @@ void test_vlsseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_tum(
@@ -326,7 +326,7 @@ void test_vlsseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_tum(
@@ -343,7 +343,7 @@ void test_vlsseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_tumu(
@@ -360,7 +360,7 @@ void test_vlsseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_tumu(
@@ -377,7 +377,7 @@ void test_vlsseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_tumu(
@@ -394,7 +394,7 @@ void test_vlsseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_tumu(
@@ -411,7 +411,7 @@ void test_vlsseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_tumu(
@@ -428,7 +428,7 @@ void test_vlsseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_tumu(
@@ -445,7 +445,7 @@ void test_vlsseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_tumu(
@@ -462,7 +462,7 @@ void test_vlsseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_tumu(
@@ -479,7 +479,7 @@ void test_vlsseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_tumu(
@@ -496,7 +496,7 @@ void test_vlsseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_tumu(
@@ -513,6 +513,6 @@ void test_vlsseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+ return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c
index d5bd2080476d..09d6cb8262af 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vlsseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vlsseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_tu(
@@ -80,7 +80,7 @@ void test_vlsseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_tu(
@@ -99,7 +99,7 @@ void test_vlsseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_tu(
@@ -118,7 +118,7 @@ void test_vlsseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_tu(
@@ -137,7 +137,7 @@ void test_vlsseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_tu(
@@ -156,7 +156,7 @@ void test_vlsseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_tu(
@@ -175,7 +175,7 @@ void test_vlsseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_tum(
@@ -194,7 +194,7 @@ void test_vlsseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_tum(
@@ -213,7 +213,7 @@ void test_vlsseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_tum(
@@ -232,7 +232,7 @@ void test_vlsseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_tum(
@@ -251,7 +251,7 @@ void test_vlsseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_tum(
@@ -270,7 +270,7 @@ void test_vlsseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_tum(
@@ -289,7 +289,7 @@ void test_vlsseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_tum(
@@ -308,7 +308,7 @@ void test_vlsseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_tum(
@@ -327,7 +327,7 @@ void test_vlsseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_tum(
@@ -346,7 +346,7 @@ void test_vlsseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_tumu(
@@ -365,7 +365,7 @@ void test_vlsseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_tumu(
@@ -384,7 +384,7 @@ void test_vlsseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_tumu(
@@ -403,7 +403,7 @@ void test_vlsseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_tumu(
@@ -422,7 +422,7 @@ void test_vlsseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_tumu(
@@ -441,7 +441,7 @@ void test_vlsseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_tumu(
@@ -460,7 +460,7 @@ void test_vlsseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_tumu(
@@ -479,7 +479,7 @@ void test_vlsseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_tumu(
@@ -498,7 +498,7 @@ void test_vlsseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_tumu(
@@ -517,6 +517,6 @@ void test_vlsseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c
index 9576c63131cc..fa5836c81627 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_tu(
@@ -42,7 +42,7 @@ void test_vlsseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_tu(
@@ -61,7 +61,7 @@ void test_vlsseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_tu(
@@ -80,7 +80,7 @@ void test_vlsseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_tu(
@@ -99,7 +99,7 @@ void test_vlsseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_tu(
@@ -118,7 +118,7 @@ void test_vlsseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_tum(
@@ -137,7 +137,7 @@ void test_vlsseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_tum(
@@ -156,7 +156,7 @@ void test_vlsseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_tum(
@@ -175,7 +175,7 @@ void test_vlsseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_tum(
@@ -194,7 +194,7 @@ void test_vlsseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_tum(
@@ -213,7 +213,7 @@ void test_vlsseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_tum(
@@ -232,7 +232,7 @@ void test_vlsseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_tumu(
@@ -251,7 +251,7 @@ void test_vlsseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_tumu(
@@ -270,7 +270,7 @@ void test_vlsseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_tumu(
@@ -289,7 +289,7 @@ void test_vlsseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_tumu(
@@ -308,7 +308,7 @@ void test_vlsseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_tumu(
@@ -327,7 +327,7 @@ void test_vlsseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_tumu(
@@ -346,6 +346,6 @@ void test_vlsseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c
index 351ee79eca4c..31a8865430f3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_tu(
@@ -42,7 +42,7 @@ void test_vlsseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_tu(
@@ -61,7 +61,7 @@ void test_vlsseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_tum(
@@ -80,7 +80,7 @@ void test_vlsseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_tum(
@@ -99,7 +99,7 @@ void test_vlsseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_tum(
@@ -118,7 +118,7 @@ void test_vlsseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_tumu(
@@ -137,7 +137,7 @@ void test_vlsseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_tumu(
@@ -156,7 +156,7 @@ void test_vlsseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_tumu(
@@ -175,6 +175,6 @@ void test_vlsseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c
index a25293be4b28..b80abdcf348b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c
@@ -22,7 +22,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_tu(
@@ -41,7 +41,7 @@ void test_vlsseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_tu(
@@ -60,7 +60,7 @@ void test_vlsseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_tu(
@@ -79,7 +79,7 @@ void test_vlsseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_tu(
@@ -98,7 +98,7 @@ void test_vlsseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_tu(
@@ -117,7 +117,7 @@ void test_vlsseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_tu(
@@ -136,7 +136,7 @@ void test_vlsseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_tu(
@@ -155,7 +155,7 @@ void test_vlsseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_tum(
@@ -174,7 +174,7 @@ void test_vlsseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_tum(
@@ -193,7 +193,7 @@ void test_vlsseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_tum(
@@ -212,7 +212,7 @@ void test_vlsseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_tum(
@@ -231,7 +231,7 @@ void test_vlsseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_tum(
@@ -250,7 +250,7 @@ void test_vlsseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_tum(
@@ -269,7 +269,7 @@ void test_vlsseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_tum(
@@ -288,7 +288,7 @@ void test_vlsseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_tum(
@@ -307,7 +307,7 @@ void test_vlsseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_tumu(
@@ -326,7 +326,7 @@ void test_vlsseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_tumu(
@@ -345,7 +345,7 @@ void test_vlsseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_tumu(
@@ -364,7 +364,7 @@ void test_vlsseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_tumu(
@@ -383,7 +383,7 @@ void test_vlsseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_tumu(
@@ -402,7 +402,7 @@ void test_vlsseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_tumu(
@@ -421,7 +421,7 @@ void test_vlsseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_tumu(
@@ -440,7 +440,7 @@ void test_vlsseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_tumu(
@@ -459,6 +459,6 @@ void test_vlsseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+ return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c
index dbc5e3317966..07ca68eec3ef 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vlsseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vlsseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_tu(
@@ -88,7 +88,7 @@ void test_vlsseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_tu(
@@ -109,7 +109,7 @@ void test_vlsseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_tu(
@@ -130,7 +130,7 @@ void test_vlsseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_tu(
@@ -151,7 +151,7 @@ void test_vlsseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_tu(
@@ -172,7 +172,7 @@ void test_vlsseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_tu(
@@ -193,7 +193,7 @@ void test_vlsseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_tum(
@@ -214,7 +214,7 @@ void test_vlsseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_tum(
@@ -235,7 +235,7 @@ void test_vlsseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_tum(
@@ -256,7 +256,7 @@ void test_vlsseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_tum(
@@ -277,7 +277,7 @@ void test_vlsseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_tum(
@@ -298,7 +298,7 @@ void test_vlsseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_tum(
@@ -319,7 +319,7 @@ void test_vlsseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_tum(
@@ -340,7 +340,7 @@ void test_vlsseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_tum(
@@ -361,7 +361,7 @@ void test_vlsseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_tum(
@@ -382,7 +382,7 @@ void test_vlsseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_tumu(
@@ -403,7 +403,7 @@ void test_vlsseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_tumu(
@@ -424,7 +424,7 @@ void test_vlsseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_tumu(
@@ -445,7 +445,7 @@ void test_vlsseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_tumu(
@@ -466,7 +466,7 @@ void test_vlsseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_tumu(
@@ -487,7 +487,7 @@ void test_vlsseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_tumu(
@@ -508,7 +508,7 @@ void test_vlsseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_tumu(
@@ -529,7 +529,7 @@ void test_vlsseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_tumu(
@@ -550,7 +550,7 @@ void test_vlsseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_tumu(
@@ -571,6 +571,6 @@ void test_vlsseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c
index 5ece9abb329b..861e65472646 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_tu(
@@ -46,7 +46,7 @@ void test_vlsseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_tu(
@@ -67,7 +67,7 @@ void test_vlsseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_tu(
@@ -88,7 +88,7 @@ void test_vlsseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_tu(
@@ -109,7 +109,7 @@ void test_vlsseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_tu(
@@ -130,7 +130,7 @@ void test_vlsseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_tum(
@@ -151,7 +151,7 @@ void test_vlsseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_tum(
@@ -172,7 +172,7 @@ void test_vlsseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_tum(
@@ -193,7 +193,7 @@ void test_vlsseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_tum(
@@ -214,7 +214,7 @@ void test_vlsseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_tum(
@@ -235,7 +235,7 @@ void test_vlsseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_tum(
@@ -256,7 +256,7 @@ void test_vlsseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_tumu(
@@ -277,7 +277,7 @@ void test_vlsseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_tumu(
@@ -298,7 +298,7 @@ void test_vlsseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_tumu(
@@ -319,7 +319,7 @@ void test_vlsseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_tumu(
@@ -340,7 +340,7 @@ void test_vlsseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_tumu(
@@ -361,7 +361,7 @@ void test_vlsseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_tumu(
@@ -382,6 +382,6 @@ void test_vlsseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c
index 5773d8c5cd0c..2ac869e1cb75 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_tu(
@@ -46,7 +46,7 @@ void test_vlsseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_tu(
@@ -67,7 +67,7 @@ void test_vlsseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_tum(
@@ -88,7 +88,7 @@ void test_vlsseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_tum(
@@ -109,7 +109,7 @@ void test_vlsseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_tum(
@@ -130,7 +130,7 @@ void test_vlsseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_tumu(
@@ -151,7 +151,7 @@ void test_vlsseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_tumu(
@@ -172,7 +172,7 @@ void test_vlsseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_tumu(
@@ -193,6 +193,6 @@ void test_vlsseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c
index fb8ac716be06..415edd4a2bff 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c
@@ -24,7 +24,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_tu(
@@ -45,7 +45,7 @@ void test_vlsseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_tu(
@@ -66,7 +66,7 @@ void test_vlsseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_tu(
@@ -87,7 +87,7 @@ void test_vlsseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_tu(
@@ -108,7 +108,7 @@ void test_vlsseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_tu(
@@ -129,7 +129,7 @@ void test_vlsseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_tu(
@@ -150,7 +150,7 @@ void test_vlsseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_tu(
@@ -171,7 +171,7 @@ void test_vlsseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_tum(
@@ -192,7 +192,7 @@ void test_vlsseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_tum(
@@ -213,7 +213,7 @@ void test_vlsseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_tum(
@@ -234,7 +234,7 @@ void test_vlsseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_tum(
@@ -255,7 +255,7 @@ void test_vlsseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_tum(
@@ -276,7 +276,7 @@ void test_vlsseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_tum(
@@ -297,7 +297,7 @@ void test_vlsseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_tum(
@@ -318,7 +318,7 @@ void test_vlsseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_tum(
@@ -339,7 +339,7 @@ void test_vlsseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_tumu(
@@ -360,7 +360,7 @@ void test_vlsseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_tumu(
@@ -381,7 +381,7 @@ void test_vlsseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_tumu(
@@ -402,7 +402,7 @@ void test_vlsseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_tumu(
@@ -423,7 +423,7 @@ void test_vlsseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_tumu(
@@ -444,7 +444,7 @@ void test_vlsseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_tumu(
@@ -465,7 +465,7 @@ void test_vlsseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_tumu(
@@ -486,7 +486,7 @@ void test_vlsseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_tumu(
@@ -507,6 +507,6 @@ void test_vlsseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+ return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c
index f947ec0d2a25..eaffe93e0906 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vlsseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vlsseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_tu(
@@ -96,7 +96,7 @@ void test_vlsseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_tu(
@@ -119,7 +119,7 @@ void test_vlsseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_tu(
@@ -142,7 +142,7 @@ void test_vlsseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_tu(
@@ -165,7 +165,7 @@ void test_vlsseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_tu(
@@ -188,7 +188,7 @@ void test_vlsseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_tu(
@@ -211,7 +211,7 @@ void test_vlsseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_tum(
@@ -234,7 +234,7 @@ void test_vlsseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_tum(
@@ -257,7 +257,7 @@ void test_vlsseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_tum(
@@ -280,7 +280,7 @@ void test_vlsseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_tum(
@@ -303,7 +303,7 @@ void test_vlsseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_tum(
@@ -326,7 +326,7 @@ void test_vlsseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_tum(
@@ -349,7 +349,7 @@ void test_vlsseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_tum(
@@ -372,7 +372,7 @@ void test_vlsseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_tum(
@@ -395,7 +395,7 @@ void test_vlsseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_tum(
@@ -418,7 +418,7 @@ void test_vlsseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_tumu(
@@ -441,7 +441,7 @@ void test_vlsseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_tumu(
@@ -464,7 +464,7 @@ void test_vlsseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_tumu(
@@ -487,7 +487,7 @@ void test_vlsseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_tumu(
@@ -510,7 +510,7 @@ void test_vlsseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_tumu(
@@ -533,7 +533,7 @@ void test_vlsseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_tumu(
@@ -556,7 +556,7 @@ void test_vlsseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_tumu(
@@ -579,7 +579,7 @@ void test_vlsseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_tumu(
@@ -602,7 +602,7 @@ void test_vlsseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_tumu(
@@ -625,6 +625,6 @@ void test_vlsseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c
index c0885b7ddf2a..a40164a76537 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_tu(
@@ -50,7 +50,7 @@ void test_vlsseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_tu(
@@ -73,7 +73,7 @@ void test_vlsseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_tu(
@@ -96,7 +96,7 @@ void test_vlsseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_tu(
@@ -119,7 +119,7 @@ void test_vlsseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_tu(
@@ -142,7 +142,7 @@ void test_vlsseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_tum(
@@ -165,7 +165,7 @@ void test_vlsseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_tum(
@@ -188,7 +188,7 @@ void test_vlsseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_tum(
@@ -211,7 +211,7 @@ void test_vlsseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_tum(
@@ -234,7 +234,7 @@ void test_vlsseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_tum(
@@ -257,7 +257,7 @@ void test_vlsseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_tum(
@@ -280,7 +280,7 @@ void test_vlsseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_tumu(
@@ -303,7 +303,7 @@ void test_vlsseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_tumu(
@@ -326,7 +326,7 @@ void test_vlsseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_tumu(
@@ -349,7 +349,7 @@ void test_vlsseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_tumu(
@@ -372,7 +372,7 @@ void test_vlsseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_tumu(
@@ -395,7 +395,7 @@ void test_vlsseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_tumu(
@@ -418,6 +418,6 @@ void test_vlsseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c
index 6edfffec3109..968b46e261c1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_tu(
@@ -50,7 +50,7 @@ void test_vlsseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_tu(
@@ -73,7 +73,7 @@ void test_vlsseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_tum(
@@ -96,7 +96,7 @@ void test_vlsseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_tum(
@@ -119,7 +119,7 @@ void test_vlsseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_tum(
@@ -142,7 +142,7 @@ void test_vlsseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_tumu(
@@ -165,7 +165,7 @@ void test_vlsseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_tumu(
@@ -188,7 +188,7 @@ void test_vlsseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_tumu(
@@ -211,6 +211,6 @@ void test_vlsseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c
index 7bcb90122a96..6d89af4d66f4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c
@@ -26,7 +26,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_tu(
@@ -49,7 +49,7 @@ void test_vlsseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_tu(
@@ -72,7 +72,7 @@ void test_vlsseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_tu(
@@ -95,7 +95,7 @@ void test_vlsseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_tu(
@@ -118,7 +118,7 @@ void test_vlsseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_tu(
@@ -141,7 +141,7 @@ void test_vlsseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_tu(
@@ -164,7 +164,7 @@ void test_vlsseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_tu(
@@ -187,7 +187,7 @@ void test_vlsseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_tum(
@@ -210,7 +210,7 @@ void test_vlsseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_tum(
@@ -233,7 +233,7 @@ void test_vlsseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_tum(
@@ -256,7 +256,7 @@ void test_vlsseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_tum(
@@ -279,7 +279,7 @@ void test_vlsseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_tum(
@@ -302,7 +302,7 @@ void test_vlsseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_tum(
@@ -325,7 +325,7 @@ void test_vlsseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_tum(
@@ -348,7 +348,7 @@ void test_vlsseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_tum(
@@ -371,7 +371,7 @@ void test_vlsseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_tumu(
@@ -394,7 +394,7 @@ void test_vlsseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_tumu(
@@ -417,7 +417,7 @@ void test_vlsseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_tumu(
@@ -440,7 +440,7 @@ void test_vlsseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_tumu(
@@ -463,7 +463,7 @@ void test_vlsseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_tumu(
@@ -486,7 +486,7 @@ void test_vlsseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_tumu(
@@ -509,7 +509,7 @@ void test_vlsseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_tumu(
@@ -532,7 +532,7 @@ void test_vlsseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_tumu(
@@ -555,6 +555,6 @@ void test_vlsseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+ return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c
index c7073bcf53d2..e889e0e8c64f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vlsseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vlsseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_tu(
@@ -104,7 +104,7 @@ void test_vlsseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_tu(
@@ -129,7 +129,7 @@ void test_vlsseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_tu(
@@ -154,7 +154,7 @@ void test_vlsseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_tu(
@@ -179,7 +179,7 @@ void test_vlsseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_tu(
@@ -204,7 +204,7 @@ void test_vlsseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_tu(
@@ -229,7 +229,7 @@ void test_vlsseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_tum(
@@ -254,7 +254,7 @@ void test_vlsseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_tum(
@@ -279,7 +279,7 @@ void test_vlsseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_tum(
@@ -304,7 +304,7 @@ void test_vlsseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_tum(
@@ -329,7 +329,7 @@ void test_vlsseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_tum(
@@ -354,7 +354,7 @@ void test_vlsseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_tum(
@@ -379,7 +379,7 @@ void test_vlsseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_tum(
@@ -404,7 +404,7 @@ void test_vlsseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_tum(
@@ -429,7 +429,7 @@ void test_vlsseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_tum(
@@ -454,7 +454,7 @@ void test_vlsseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_tumu(
@@ -479,7 +479,7 @@ void test_vlsseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_tumu(
@@ -504,7 +504,7 @@ void test_vlsseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_tumu(
@@ -529,7 +529,7 @@ void test_vlsseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_tumu(
@@ -554,7 +554,7 @@ void test_vlsseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_tumu(
@@ -579,7 +579,7 @@ void test_vlsseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_tumu(
@@ -604,7 +604,7 @@ void test_vlsseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_tumu(
@@ -629,7 +629,7 @@ void test_vlsseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_tumu(
@@ -654,7 +654,7 @@ void test_vlsseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_tumu(
@@ -679,6 +679,6 @@ void test_vlsseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c
index 2790a307e8a2..375c9e7271b5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_tu(
@@ -54,7 +54,7 @@ void test_vlsseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_tu(
@@ -79,7 +79,7 @@ void test_vlsseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_tu(
@@ -104,7 +104,7 @@ void test_vlsseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_tu(
@@ -129,7 +129,7 @@ void test_vlsseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_tu(
@@ -154,7 +154,7 @@ void test_vlsseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_tum(
@@ -179,7 +179,7 @@ void test_vlsseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_tum(
@@ -204,7 +204,7 @@ void test_vlsseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_tum(
@@ -229,7 +229,7 @@ void test_vlsseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_tum(
@@ -254,7 +254,7 @@ void test_vlsseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_tum(
@@ -279,7 +279,7 @@ void test_vlsseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_tum(
@@ -304,7 +304,7 @@ void test_vlsseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_tumu(
@@ -329,7 +329,7 @@ void test_vlsseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_tumu(
@@ -354,7 +354,7 @@ void test_vlsseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_tumu(
@@ -379,7 +379,7 @@ void test_vlsseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_tumu(
@@ -404,7 +404,7 @@ void test_vlsseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_tumu(
@@ -429,7 +429,7 @@ void test_vlsseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_tumu(
@@ -454,6 +454,6 @@ void test_vlsseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c
index 7d7319638540..cca0f1573e9e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_tu(
@@ -54,7 +54,7 @@ void test_vlsseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_tu(
@@ -79,7 +79,7 @@ void test_vlsseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_tum(
@@ -104,7 +104,7 @@ void test_vlsseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_tum(
@@ -129,7 +129,7 @@ void test_vlsseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_tum(
@@ -154,7 +154,7 @@ void test_vlsseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_tumu(
@@ -179,7 +179,7 @@ void test_vlsseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_tumu(
@@ -204,7 +204,7 @@ void test_vlsseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_tumu(
@@ -229,6 +229,6 @@ void test_vlsseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c
index ea9e28d3a8fb..be7bdd53520e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c
@@ -28,7 +28,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_tu(
@@ -53,7 +53,7 @@ void test_vlsseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_tu(
@@ -78,7 +78,7 @@ void test_vlsseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_tu(
@@ -103,7 +103,7 @@ void test_vlsseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_tu(
@@ -128,7 +128,7 @@ void test_vlsseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_tu(
@@ -153,7 +153,7 @@ void test_vlsseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_tu(
@@ -178,7 +178,7 @@ void test_vlsseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_tu(
@@ -203,7 +203,7 @@ void test_vlsseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_tum(
@@ -228,7 +228,7 @@ void test_vlsseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_tum(
@@ -253,7 +253,7 @@ void test_vlsseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_tum(
@@ -278,7 +278,7 @@ void test_vlsseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_tum(
@@ -303,7 +303,7 @@ void test_vlsseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_tum(
@@ -328,7 +328,7 @@ void test_vlsseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_tum(
@@ -353,7 +353,7 @@ void test_vlsseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_tum(
@@ -378,7 +378,7 @@ void test_vlsseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_tum(
@@ -403,7 +403,7 @@ void test_vlsseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_tumu(
@@ -428,7 +428,7 @@ void test_vlsseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_tumu(
@@ -453,7 +453,7 @@ void test_vlsseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_tumu(
@@ -478,7 +478,7 @@ void test_vlsseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_tumu(
@@ -503,7 +503,7 @@ void test_vlsseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_tumu(
@@ -528,7 +528,7 @@ void test_vlsseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_tumu(
@@ -553,7 +553,7 @@ void test_vlsseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_tumu(
@@ -578,7 +578,7 @@ void test_vlsseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_tumu(
@@ -603,6 +603,6 @@ void test_vlsseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vlsseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) {
- return vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+ return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei16.c
index 6f18b43d39dc..f1b7b8f300d6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_tu(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_tu(
@@ -157,7 +157,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_tu(
@@ -166,7 +166,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_tu(
@@ -175,7 +175,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_tu(
@@ -184,7 +184,7 @@ vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_tu(
@@ -193,7 +193,7 @@ vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_tu(
@@ -202,7 +202,7 @@ vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_tu(
@@ -211,7 +211,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_tu(
@@ -220,7 +220,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_tu(
@@ -229,7 +229,7 @@ vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_tu(
@@ -238,7 +238,7 @@ vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_tu(
@@ -247,7 +247,7 @@ vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_tu(
@@ -256,7 +256,7 @@ vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_tu(
@@ -265,7 +265,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_tu(
@@ -274,7 +274,7 @@ vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_tu(
@@ -283,7 +283,7 @@ vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_tu(
@@ -292,7 +292,7 @@ vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_tu(
@@ -301,7 +301,7 @@ vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_tu(
@@ -310,7 +310,7 @@ vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_tu(
@@ -319,7 +319,7 @@ vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_tu(
@@ -328,7 +328,7 @@ vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_tu(
@@ -337,7 +337,7 @@ vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_tu(
@@ -346,7 +346,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_tu(
@@ -355,7 +355,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_tu(
@@ -364,7 +364,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_tu(
@@ -373,7 +373,7 @@ vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_tu(
@@ -382,7 +382,7 @@ vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_tu(
@@ -391,7 +391,7 @@ vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_tu(
@@ -400,7 +400,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_tu(
@@ -409,7 +409,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_tu(
@@ -418,7 +418,7 @@ vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_tu(
@@ -427,7 +427,7 @@ vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_tu(
@@ -436,7 +436,7 @@ vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_tu(
@@ -445,7 +445,7 @@ vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_tu(
@@ -454,7 +454,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_tu(
@@ -463,7 +463,7 @@ vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_tu(
@@ -472,7 +472,7 @@ vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_tu(
@@ -481,7 +481,7 @@ vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_tu(
@@ -490,7 +490,7 @@ vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_tu(
@@ -499,7 +499,7 @@ vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_tu(
@@ -508,7 +508,7 @@ vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_tu(
@@ -517,7 +517,7 @@ vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_tum(
@@ -526,7 +526,7 @@ vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_tum(
@@ -535,7 +535,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_tum(
@@ -544,7 +544,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_tum(
@@ -553,7 +553,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_tum(
@@ -562,7 +562,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_tum(
@@ -571,7 +571,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_tum(
@@ -580,7 +580,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_tum(
@@ -589,7 +589,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_tum(
@@ -598,7 +598,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_tum(
@@ -607,7 +607,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_tum(
@@ -616,7 +616,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_tum(
@@ -625,7 +625,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_tum(
@@ -634,7 +634,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_tum(
@@ -643,7 +643,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_tum(
@@ -652,7 +652,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_tum(
@@ -661,7 +661,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_tum(
@@ -670,7 +670,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_tum(
@@ -679,7 +679,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_tum(
@@ -688,7 +688,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_tum(
@@ -697,7 +697,7 @@ vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_tum(
@@ -706,7 +706,7 @@ vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_tum(
@@ -715,7 +715,7 @@ vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_tum(
@@ -724,7 +724,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_tum(
@@ -733,7 +733,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_tum(
@@ -742,7 +742,7 @@ vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_tum(
@@ -751,7 +751,7 @@ vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_tum(
@@ -760,7 +760,7 @@ vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_tum(
@@ -769,7 +769,7 @@ vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_tum(
@@ -778,7 +778,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_tum(
@@ -787,7 +787,7 @@ vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_tum(
@@ -796,7 +796,7 @@ vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_tum(
@@ -805,7 +805,7 @@ vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_tum(
@@ -814,7 +814,7 @@ vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_tum(
@@ -823,7 +823,7 @@ vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_tum(
@@ -832,7 +832,7 @@ vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_tum(
@@ -841,7 +841,7 @@ vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_tum(
@@ -850,7 +850,7 @@ vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_tum(
@@ -859,7 +859,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_tum(
@@ -868,7 +868,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_tum(
@@ -877,7 +877,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_tum(
@@ -886,7 +886,7 @@ vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_tum(
@@ -895,7 +895,7 @@ vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_tum(
@@ -904,7 +904,7 @@ vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_tum(
@@ -913,7 +913,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_tum(
@@ -922,7 +922,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_tum(
@@ -931,7 +931,7 @@ vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_tum(
@@ -940,7 +940,7 @@ vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_tum(
@@ -949,7 +949,7 @@ vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_tum(
@@ -958,7 +958,7 @@ vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_tum(
@@ -967,7 +967,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_tum(
@@ -976,7 +976,7 @@ vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_tum(
@@ -985,7 +985,7 @@ vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_tum(
@@ -994,7 +994,7 @@ vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_tum(
@@ -1003,7 +1003,7 @@ vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_tum(
@@ -1012,7 +1012,7 @@ vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_tum(
@@ -1021,7 +1021,7 @@ vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_tum(
@@ -1030,7 +1030,7 @@ vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_tumu(
@@ -1039,7 +1039,7 @@ vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_tumu(
@@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_tumu(
@@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_tumu(
@@ -1066,7 +1066,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_tumu(
@@ -1075,7 +1075,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_tumu(
@@ -1084,7 +1084,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_tumu(
@@ -1093,7 +1093,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_tumu(
@@ -1102,7 +1102,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_tumu(
@@ -1111,7 +1111,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_tumu(
@@ -1120,7 +1120,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_tumu(
@@ -1129,7 +1129,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_tumu(
@@ -1138,7 +1138,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_tumu(
@@ -1147,7 +1147,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_tumu(
@@ -1156,7 +1156,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_tumu(
@@ -1165,7 +1165,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_tumu(
@@ -1174,7 +1174,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_tumu(
@@ -1183,7 +1183,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_tumu(
@@ -1192,7 +1192,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_tumu(
@@ -1201,7 +1201,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_tumu(
@@ -1210,7 +1210,7 @@ vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_tumu(
@@ -1219,7 +1219,7 @@ vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_tumu(
@@ -1228,7 +1228,7 @@ vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_tumu(
@@ -1237,7 +1237,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_tumu(
@@ -1246,7 +1246,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_tumu(
@@ -1255,7 +1255,7 @@ vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_tumu(
@@ -1264,7 +1264,7 @@ vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_tumu(
@@ -1273,7 +1273,7 @@ vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_tumu(
@@ -1282,7 +1282,7 @@ vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_tumu(
@@ -1291,7 +1291,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_tumu(
@@ -1300,7 +1300,7 @@ vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_tumu(
@@ -1309,7 +1309,7 @@ vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_tumu(
@@ -1318,7 +1318,7 @@ vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_tumu(
@@ -1327,7 +1327,7 @@ vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_tumu(
@@ -1336,7 +1336,7 @@ vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_tumu(
@@ -1345,7 +1345,7 @@ vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_tumu(
@@ -1354,7 +1354,7 @@ vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_tumu(
@@ -1363,7 +1363,7 @@ vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_tumu(
@@ -1372,7 +1372,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_tumu(
@@ -1381,7 +1381,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_tumu(
@@ -1390,7 +1390,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_tumu(
@@ -1399,7 +1399,7 @@ vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_tumu(
@@ -1408,7 +1408,7 @@ vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_tumu(
@@ -1417,7 +1417,7 @@ vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_tumu(
@@ -1426,7 +1426,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_tumu(
@@ -1435,7 +1435,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_tumu(
@@ -1444,7 +1444,7 @@ vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_tumu(
@@ -1453,7 +1453,7 @@ vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_tumu(
@@ -1462,7 +1462,7 @@ vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_tumu(
@@ -1471,7 +1471,7 @@ vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_tumu(
@@ -1480,7 +1480,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_tumu(
@@ -1489,7 +1489,7 @@ vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_tumu(
@@ -1498,7 +1498,7 @@ vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_tumu(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_tumu(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_tumu(
@@ -1525,7 +1525,7 @@ vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_tumu(
@@ -1534,7 +1534,7 @@ vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_tumu(
@@ -1543,7 +1543,7 @@ vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_mu(
@@ -1552,7 +1552,7 @@ vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_mu(
@@ -1561,7 +1561,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_mu(
@@ -1570,7 +1570,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_mu(
@@ -1579,7 +1579,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_mu(
@@ -1588,7 +1588,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_mu(
@@ -1597,7 +1597,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_mu(
@@ -1606,7 +1606,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_mu(
@@ -1615,7 +1615,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_mu(
@@ -1624,7 +1624,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_mu(
@@ -1633,7 +1633,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_mu(
@@ -1642,7 +1642,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_mu(
@@ -1651,7 +1651,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_mu(
@@ -1660,7 +1660,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_mu(
@@ -1669,7 +1669,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_mu(
@@ -1678,7 +1678,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_mu(
@@ -1687,7 +1687,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_mu(
@@ -1696,7 +1696,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_mu(
@@ -1705,7 +1705,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_mu(
@@ -1714,7 +1714,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_mu(
@@ -1723,7 +1723,7 @@ vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_mu(
@@ -1732,7 +1732,7 @@ vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_mu(
@@ -1741,7 +1741,7 @@ vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_mu(
@@ -1750,7 +1750,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_mu(
@@ -1759,7 +1759,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_mu(
@@ -1768,7 +1768,7 @@ vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_mu(
@@ -1777,7 +1777,7 @@ vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_mu(
@@ -1786,7 +1786,7 @@ vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_mu(
@@ -1795,7 +1795,7 @@ vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_mu(
@@ -1804,7 +1804,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_mu(
@@ -1813,7 +1813,7 @@ vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_mu(
@@ -1822,7 +1822,7 @@ vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_mu(
@@ -1831,7 +1831,7 @@ vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_mu(
@@ -1840,7 +1840,7 @@ vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_mu(
@@ -1849,7 +1849,7 @@ vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_mu(
@@ -1858,7 +1858,7 @@ vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_mu(
@@ -1867,7 +1867,7 @@ vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_mu(
@@ -1876,7 +1876,7 @@ vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_mu(
@@ -1885,7 +1885,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_mu(
@@ -1894,7 +1894,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_mu(
@@ -1903,7 +1903,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_mu(
@@ -1912,7 +1912,7 @@ vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_mu(
@@ -1921,7 +1921,7 @@ vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_mu(
@@ -1930,7 +1930,7 @@ vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_mu(
@@ -1939,7 +1939,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_mu(
@@ -1948,7 +1948,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_mu(
@@ -1957,7 +1957,7 @@ vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_mu(
@@ -1966,7 +1966,7 @@ vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_mu(
@@ -1975,7 +1975,7 @@ vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_mu(
@@ -1984,7 +1984,7 @@ vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_mu(
@@ -1993,7 +1993,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_mu(
@@ -2002,7 +2002,7 @@ vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_mu(
@@ -2011,7 +2011,7 @@ vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_mu(
@@ -2020,7 +2020,7 @@ vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_mu(
@@ -2029,7 +2029,7 @@ vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_mu(
@@ -2038,7 +2038,7 @@ vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_mu(
@@ -2047,7 +2047,7 @@ vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_mu(
@@ -2056,6 +2056,6 @@ vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei16_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxei16_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei32.c
index 89afece97c98..c12c7e1d8261 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei32.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_tu(
@@ -67,7 +67,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_tu(
@@ -76,7 +76,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_tu(
@@ -85,7 +85,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_tu(
@@ -94,7 +94,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_tu(
@@ -103,7 +103,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_tu(
@@ -112,7 +112,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_tu(
@@ -121,7 +121,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_tu(
@@ -130,7 +130,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_tu(
@@ -139,7 +139,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_tu(
@@ -148,7 +148,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_tu(
@@ -157,7 +157,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_tu(
@@ -166,7 +166,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_tu(
@@ -175,7 +175,7 @@ vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_tu(
@@ -184,7 +184,7 @@ vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_tu(
@@ -193,7 +193,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_tu(
@@ -202,7 +202,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_tu(
@@ -211,7 +211,7 @@ vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_tu(
@@ -220,7 +220,7 @@ vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_tu(
@@ -229,7 +229,7 @@ vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_tu(
@@ -238,7 +238,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_tu(
@@ -247,7 +247,7 @@ vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_tu(
@@ -256,7 +256,7 @@ vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_tu(
@@ -265,7 +265,7 @@ vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_tu(
@@ -274,7 +274,7 @@ vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_tu(
@@ -283,7 +283,7 @@ vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_tu(
@@ -292,7 +292,7 @@ vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_tu(
@@ -301,7 +301,7 @@ vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_tu(
@@ -310,7 +310,7 @@ vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_tu(
@@ -319,7 +319,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_tu(
@@ -328,7 +328,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_tu(
@@ -337,7 +337,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_tu(
@@ -346,7 +346,7 @@ vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_tu(
@@ -355,7 +355,7 @@ vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_tu(
@@ -364,7 +364,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_tu(
@@ -373,7 +373,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_tu(
@@ -382,7 +382,7 @@ vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_tu(
@@ -391,7 +391,7 @@ vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_tu(
@@ -400,7 +400,7 @@ vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_tu(
@@ -409,7 +409,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_tu(
@@ -418,7 +418,7 @@ vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_tu(
@@ -427,7 +427,7 @@ vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_tu(
@@ -436,7 +436,7 @@ vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_tu(
@@ -445,7 +445,7 @@ vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_tu(
@@ -454,7 +454,7 @@ vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_tu(
@@ -463,7 +463,7 @@ vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_tu(
@@ -472,7 +472,7 @@ vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_tum(
@@ -481,7 +481,7 @@ vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_tum(
@@ -490,7 +490,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_tum(
@@ -499,7 +499,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_tum(
@@ -508,7 +508,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_tum(
@@ -517,7 +517,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_tum(
@@ -526,7 +526,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_tum(
@@ -535,7 +535,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_tum(
@@ -544,7 +544,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_tum(
@@ -553,7 +553,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_tum(
@@ -562,7 +562,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_tum(
@@ -571,7 +571,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_tum(
@@ -580,7 +580,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_tum(
@@ -589,7 +589,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_tum(
@@ -598,7 +598,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_tum(
@@ -607,7 +607,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_tum(
@@ -616,7 +616,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_tum(
@@ -625,7 +625,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_tum(
@@ -634,7 +634,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_tum(
@@ -643,7 +643,7 @@ vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_tum(
@@ -652,7 +652,7 @@ vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_tum(
@@ -661,7 +661,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_tum(
@@ -670,7 +670,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_tum(
@@ -679,7 +679,7 @@ vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_tum(
@@ -688,7 +688,7 @@ vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_tum(
@@ -697,7 +697,7 @@ vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_tum(
@@ -706,7 +706,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_tum(
@@ -715,7 +715,7 @@ vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_tum(
@@ -724,7 +724,7 @@ vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_tum(
@@ -733,7 +733,7 @@ vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_tum(
@@ -742,7 +742,7 @@ vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_tum(
@@ -751,7 +751,7 @@ vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_tum(
@@ -760,7 +760,7 @@ vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_tum(
@@ -769,7 +769,7 @@ vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_tum(
@@ -778,7 +778,7 @@ vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_tum(
@@ -787,7 +787,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_tum(
@@ -796,7 +796,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_tum(
@@ -805,7 +805,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_tum(
@@ -814,7 +814,7 @@ vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_tum(
@@ -823,7 +823,7 @@ vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_tum(
@@ -832,7 +832,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_tum(
@@ -841,7 +841,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_tum(
@@ -850,7 +850,7 @@ vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_tum(
@@ -859,7 +859,7 @@ vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_tum(
@@ -868,7 +868,7 @@ vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_tum(
@@ -877,7 +877,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_tum(
@@ -886,7 +886,7 @@ vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_tum(
@@ -895,7 +895,7 @@ vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_tum(
@@ -904,7 +904,7 @@ vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_tum(
@@ -913,7 +913,7 @@ vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_tum(
@@ -922,7 +922,7 @@ vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_tum(
@@ -931,7 +931,7 @@ vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_tum(
@@ -940,7 +940,7 @@ vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_tumu(
@@ -949,7 +949,7 @@ vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_tumu(
@@ -958,7 +958,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_tumu(
@@ -967,7 +967,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_tumu(
@@ -976,7 +976,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_tumu(
@@ -985,7 +985,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_tumu(
@@ -994,7 +994,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_tumu(
@@ -1003,7 +1003,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_tumu(
@@ -1012,7 +1012,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_tumu(
@@ -1021,7 +1021,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_tumu(
@@ -1030,7 +1030,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_tumu(
@@ -1039,7 +1039,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_tumu(
@@ -1048,7 +1048,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_tumu(
@@ -1057,7 +1057,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_tumu(
@@ -1066,7 +1066,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_tumu(
@@ -1075,7 +1075,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_tumu(
@@ -1084,7 +1084,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_tumu(
@@ -1093,7 +1093,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_tumu(
@@ -1102,7 +1102,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_tumu(
@@ -1111,7 +1111,7 @@ vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_tumu(
@@ -1120,7 +1120,7 @@ vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_tumu(
@@ -1129,7 +1129,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_tumu(
@@ -1138,7 +1138,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_tumu(
@@ -1147,7 +1147,7 @@ vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_tumu(
@@ -1156,7 +1156,7 @@ vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_tumu(
@@ -1165,7 +1165,7 @@ vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_tumu(
@@ -1174,7 +1174,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_tumu(
@@ -1183,7 +1183,7 @@ vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_tumu(
@@ -1192,7 +1192,7 @@ vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_tumu(
@@ -1201,7 +1201,7 @@ vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_tumu(
@@ -1210,7 +1210,7 @@ vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_tumu(
@@ -1219,7 +1219,7 @@ vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_tumu(
@@ -1228,7 +1228,7 @@ vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_tumu(
@@ -1237,7 +1237,7 @@ vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_tumu(
@@ -1246,7 +1246,7 @@ vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_tumu(
@@ -1255,7 +1255,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_tumu(
@@ -1264,7 +1264,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_tumu(
@@ -1273,7 +1273,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_tumu(
@@ -1282,7 +1282,7 @@ vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_tumu(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_tumu(
@@ -1300,7 +1300,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_tumu(
@@ -1309,7 +1309,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_tumu(
@@ -1318,7 +1318,7 @@ vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_tumu(
@@ -1327,7 +1327,7 @@ vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_tumu(
@@ -1336,7 +1336,7 @@ vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_tumu(
@@ -1345,7 +1345,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_tumu(
@@ -1354,7 +1354,7 @@ vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_tumu(
@@ -1363,7 +1363,7 @@ vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_tumu(
@@ -1372,7 +1372,7 @@ vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_tumu(
@@ -1381,7 +1381,7 @@ vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_tumu(
@@ -1390,7 +1390,7 @@ vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_tumu(
@@ -1399,7 +1399,7 @@ vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_tumu(
@@ -1408,7 +1408,7 @@ vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_mu(
@@ -1417,7 +1417,7 @@ vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_mu(
@@ -1426,7 +1426,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_mu(
@@ -1435,7 +1435,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_mu(
@@ -1444,7 +1444,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_mu(
@@ -1453,7 +1453,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_mu(
@@ -1462,7 +1462,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_mu(
@@ -1471,7 +1471,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_mu(
@@ -1480,7 +1480,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_mu(
@@ -1489,7 +1489,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_mu(
@@ -1498,7 +1498,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_mu(
@@ -1507,7 +1507,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_mu(
@@ -1516,7 +1516,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_mu(
@@ -1525,7 +1525,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_mu(
@@ -1534,7 +1534,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_mu(
@@ -1543,7 +1543,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_mu(
@@ -1552,7 +1552,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_mu(
@@ -1561,7 +1561,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_mu(
@@ -1570,7 +1570,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_mu(
@@ -1579,7 +1579,7 @@ vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_mu(
@@ -1588,7 +1588,7 @@ vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_mu(
@@ -1597,7 +1597,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_mu(
@@ -1606,7 +1606,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_mu(
@@ -1615,7 +1615,7 @@ vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_mu(
@@ -1624,7 +1624,7 @@ vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_mu(
@@ -1633,7 +1633,7 @@ vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_mu(
@@ -1642,7 +1642,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_mu(
@@ -1651,7 +1651,7 @@ vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_mu(
@@ -1660,7 +1660,7 @@ vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_mu(
@@ -1669,7 +1669,7 @@ vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_mu(
@@ -1678,7 +1678,7 @@ vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_mu(
@@ -1687,7 +1687,7 @@ vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_mu(
@@ -1696,7 +1696,7 @@ vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_mu(
@@ -1705,7 +1705,7 @@ vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_mu(
@@ -1714,7 +1714,7 @@ vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_mu(
@@ -1723,7 +1723,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_mu(
@@ -1732,7 +1732,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_mu(
@@ -1741,7 +1741,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_mu(
@@ -1750,7 +1750,7 @@ vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_mu(
@@ -1759,7 +1759,7 @@ vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_mu(
@@ -1768,7 +1768,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_mu(
@@ -1777,7 +1777,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_mu(
@@ -1786,7 +1786,7 @@ vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_mu(
@@ -1795,7 +1795,7 @@ vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_mu(
@@ -1804,7 +1804,7 @@ vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_mu(
@@ -1813,7 +1813,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_mu(
@@ -1822,7 +1822,7 @@ vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_mu(
@@ -1831,7 +1831,7 @@ vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_mu(
@@ -1840,7 +1840,7 @@ vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_mu(
@@ -1849,7 +1849,7 @@ vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_mu(
@@ -1858,7 +1858,7 @@ vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_mu(
@@ -1867,7 +1867,7 @@ vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_mu(
@@ -1876,6 +1876,6 @@ vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei32_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxei32_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei64.c
index 7ce61fe796b3..2145bc8a99da 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei64.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_tu(
@@ -58,7 +58,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_tu(
@@ -67,7 +67,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_tu(
@@ -76,7 +76,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_tu(
@@ -85,7 +85,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_tu(
@@ -94,7 +94,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_tu(
@@ -103,7 +103,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_tu(
@@ -112,7 +112,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_tu(
@@ -121,7 +121,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_tu(
@@ -130,7 +130,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_tu(
@@ -139,7 +139,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_tu(
@@ -148,7 +148,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_tu(
@@ -157,7 +157,7 @@ vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_tu(
@@ -166,7 +166,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_tu(
@@ -175,7 +175,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_tu(
@@ -184,7 +184,7 @@ vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_tu(
@@ -193,7 +193,7 @@ vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_tu(
@@ -202,7 +202,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_tu(
@@ -211,7 +211,7 @@ vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_tu(
@@ -220,7 +220,7 @@ vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_tu(
@@ -229,7 +229,7 @@ vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_tu(
@@ -238,7 +238,7 @@ vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_tu(
@@ -247,7 +247,7 @@ vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_tu(
@@ -256,7 +256,7 @@ vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_tu(
@@ -265,7 +265,7 @@ vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_tu(
@@ -274,7 +274,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_tu(
@@ -283,7 +283,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_tu(
@@ -292,7 +292,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_tu(
@@ -301,7 +301,7 @@ vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_tu(
@@ -310,7 +310,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_tu(
@@ -319,7 +319,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_tu(
@@ -328,7 +328,7 @@ vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_tu(
@@ -337,7 +337,7 @@ vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_tu(
@@ -346,7 +346,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_tu(
@@ -355,7 +355,7 @@ vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_tu(
@@ -364,7 +364,7 @@ vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_tu(
@@ -373,7 +373,7 @@ vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_tu(
@@ -382,7 +382,7 @@ vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_tu(
@@ -391,7 +391,7 @@ vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_tu(
@@ -400,7 +400,7 @@ vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_tum(
@@ -409,7 +409,7 @@ vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_tum(
@@ -418,7 +418,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_tum(
@@ -427,7 +427,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_tum(
@@ -436,7 +436,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_tum(
@@ -445,7 +445,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_tum(
@@ -454,7 +454,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_tum(
@@ -463,7 +463,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_tum(
@@ -472,7 +472,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_tum(
@@ -481,7 +481,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_tum(
@@ -490,7 +490,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_tum(
@@ -499,7 +499,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_tum(
@@ -508,7 +508,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_tum(
@@ -517,7 +517,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_tum(
@@ -526,7 +526,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_tum(
@@ -535,7 +535,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_tum(
@@ -544,7 +544,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_tum(
@@ -553,7 +553,7 @@ vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_tum(
@@ -562,7 +562,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_tum(
@@ -571,7 +571,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_tum(
@@ -580,7 +580,7 @@ vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_tum(
@@ -589,7 +589,7 @@ vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_tum(
@@ -598,7 +598,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_tum(
@@ -607,7 +607,7 @@ vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_tum(
@@ -616,7 +616,7 @@ vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_tum(
@@ -625,7 +625,7 @@ vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_tum(
@@ -634,7 +634,7 @@ vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_tum(
@@ -643,7 +643,7 @@ vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_tum(
@@ -652,7 +652,7 @@ vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_tum(
@@ -661,7 +661,7 @@ vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_tum(
@@ -670,7 +670,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_tum(
@@ -679,7 +679,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_tum(
@@ -688,7 +688,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_tum(
@@ -697,7 +697,7 @@ vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_tum(
@@ -706,7 +706,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_tum(
@@ -715,7 +715,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_tum(
@@ -724,7 +724,7 @@ vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_tum(
@@ -733,7 +733,7 @@ vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_tum(
@@ -742,7 +742,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_tum(
@@ -751,7 +751,7 @@ vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_tum(
@@ -760,7 +760,7 @@ vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_tum(
@@ -769,7 +769,7 @@ vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_tum(
@@ -778,7 +778,7 @@ vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_tum(
@@ -787,7 +787,7 @@ vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_tum(
@@ -796,7 +796,7 @@ vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_tumu(
@@ -805,7 +805,7 @@ vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_tumu(
@@ -814,7 +814,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_tumu(
@@ -823,7 +823,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_tumu(
@@ -832,7 +832,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_tumu(
@@ -841,7 +841,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_tumu(
@@ -850,7 +850,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_tumu(
@@ -859,7 +859,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_tumu(
@@ -868,7 +868,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_tumu(
@@ -877,7 +877,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_tumu(
@@ -886,7 +886,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_tumu(
@@ -895,7 +895,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_tumu(
@@ -904,7 +904,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_tumu(
@@ -913,7 +913,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_tumu(
@@ -922,7 +922,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_tumu(
@@ -931,7 +931,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_tumu(
@@ -940,7 +940,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_tumu(
@@ -949,7 +949,7 @@ vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_tumu(
@@ -958,7 +958,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_tumu(
@@ -967,7 +967,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_tumu(
@@ -976,7 +976,7 @@ vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_tumu(
@@ -985,7 +985,7 @@ vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_tumu(
@@ -994,7 +994,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_tumu(
@@ -1003,7 +1003,7 @@ vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_tumu(
@@ -1012,7 +1012,7 @@ vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_tumu(
@@ -1021,7 +1021,7 @@ vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_tumu(
@@ -1030,7 +1030,7 @@ vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_tumu(
@@ -1039,7 +1039,7 @@ vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_tumu(
@@ -1048,7 +1048,7 @@ vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_tumu(
@@ -1057,7 +1057,7 @@ vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_tumu(
@@ -1066,7 +1066,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_tumu(
@@ -1075,7 +1075,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_tumu(
@@ -1084,7 +1084,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_tumu(
@@ -1093,7 +1093,7 @@ vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_tumu(
@@ -1102,7 +1102,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_tumu(
@@ -1111,7 +1111,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_tumu(
@@ -1120,7 +1120,7 @@ vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_tumu(
@@ -1129,7 +1129,7 @@ vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_tumu(
@@ -1138,7 +1138,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_tumu(
@@ -1147,7 +1147,7 @@ vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_tumu(
@@ -1156,7 +1156,7 @@ vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_tumu(
@@ -1165,7 +1165,7 @@ vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_tumu(
@@ -1174,7 +1174,7 @@ vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_tumu(
@@ -1183,7 +1183,7 @@ vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_tumu(
@@ -1192,7 +1192,7 @@ vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_mu(
@@ -1201,7 +1201,7 @@ vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_mu(
@@ -1210,7 +1210,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_mu(
@@ -1219,7 +1219,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_mu(
@@ -1228,7 +1228,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_mu(
@@ -1237,7 +1237,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_mu(
@@ -1246,7 +1246,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_mu(
@@ -1255,7 +1255,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_mu(
@@ -1264,7 +1264,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_mu(
@@ -1273,7 +1273,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_mu(
@@ -1282,7 +1282,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_mu(
@@ -1291,7 +1291,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_mu(
@@ -1300,7 +1300,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_mu(
@@ -1309,7 +1309,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_mu(
@@ -1318,7 +1318,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_mu(
@@ -1327,7 +1327,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_mu(
@@ -1336,7 +1336,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_mu(
@@ -1345,7 +1345,7 @@ vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_mu(
@@ -1354,7 +1354,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_mu(
@@ -1363,7 +1363,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_mu(
@@ -1372,7 +1372,7 @@ vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_mu(
@@ -1381,7 +1381,7 @@ vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_mu(
@@ -1390,7 +1390,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_mu(
@@ -1399,7 +1399,7 @@ vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_mu(
@@ -1408,7 +1408,7 @@ vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_mu(
@@ -1417,7 +1417,7 @@ vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_mu(
@@ -1426,7 +1426,7 @@ vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_mu(
@@ -1435,7 +1435,7 @@ vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_mu(
@@ -1444,7 +1444,7 @@ vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_mu(
@@ -1453,7 +1453,7 @@ vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_mu(
@@ -1462,7 +1462,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_mu(
@@ -1471,7 +1471,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_mu(
@@ -1480,7 +1480,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_mu(
@@ -1489,7 +1489,7 @@ vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_mu(
@@ -1498,7 +1498,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_mu(
@@ -1507,7 +1507,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_mu(
@@ -1516,7 +1516,7 @@ vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_mu(
@@ -1525,7 +1525,7 @@ vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_mu(
@@ -1534,7 +1534,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_mu(
@@ -1543,7 +1543,7 @@ vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_mu(
@@ -1552,7 +1552,7 @@ vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_mu(
@@ -1561,7 +1561,7 @@ vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_mu(
@@ -1570,7 +1570,7 @@ vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_mu(
@@ -1579,7 +1579,7 @@ vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_mu(
@@ -1588,6 +1588,6 @@ vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxei64_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei8.c
index c9d924446e53..7957cc641aee 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxei8.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *bas
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *bas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_tu(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_tu(
@@ -157,7 +157,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_tu(
@@ -166,7 +166,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_tu(
@@ -175,7 +175,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_tu(
@@ -184,7 +184,7 @@ vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_tu(
@@ -193,7 +193,7 @@ vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_tu(
@@ -202,7 +202,7 @@ vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_tu(
@@ -211,7 +211,7 @@ vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_tu(
@@ -220,7 +220,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_tu(
@@ -229,7 +229,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_tu(
@@ -238,7 +238,7 @@ vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_tu(
@@ -247,7 +247,7 @@ vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_tu(
@@ -256,7 +256,7 @@ vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_tu(
@@ -265,7 +265,7 @@ vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_tu(
@@ -274,7 +274,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_tu(
@@ -301,7 +301,7 @@ vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_tu(
@@ -310,7 +310,7 @@ vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_tu(
@@ -319,7 +319,7 @@ vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_tu(
@@ -328,7 +328,7 @@ vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_tu(
@@ -337,7 +337,7 @@ vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_tu(
@@ -346,7 +346,7 @@ vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_tu(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_tu(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_tu(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_tu(
@@ -382,7 +382,7 @@ vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_tu(
@@ -391,7 +391,7 @@ vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_tu(
@@ -400,7 +400,7 @@ vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_tu(
@@ -409,7 +409,7 @@ vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_tu(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_tu(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *ba
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_tu(
@@ -436,7 +436,7 @@ vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_tu(
@@ -445,7 +445,7 @@ vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_tu(
@@ -454,7 +454,7 @@ vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_tu(
@@ -463,7 +463,7 @@ vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_tu(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *ba
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_tu(
@@ -481,7 +481,7 @@ vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_tu(
@@ -490,7 +490,7 @@ vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_tu(
@@ -499,7 +499,7 @@ vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_tu(
@@ -508,7 +508,7 @@ vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_tu(
@@ -517,7 +517,7 @@ vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_tu(
@@ -526,7 +526,7 @@ vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_tu(
@@ -535,7 +535,7 @@ vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tu(maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tu(maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_tum(
@@ -544,7 +544,7 @@ vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_tum(
@@ -553,7 +553,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_tum(
@@ -562,7 +562,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_tum(
@@ -571,7 +571,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_tum(
@@ -580,7 +580,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_tum(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_tum(
@@ -598,7 +598,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_tum(
@@ -607,7 +607,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_tum(
@@ -616,7 +616,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_tum(
@@ -625,7 +625,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_tum(
@@ -634,7 +634,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_tum(
@@ -643,7 +643,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_tum(
@@ -652,7 +652,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_tum(
@@ -661,7 +661,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_tum(
@@ -670,7 +670,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_tum(
@@ -679,7 +679,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_tum(
@@ -688,7 +688,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_tum(
@@ -697,7 +697,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_tum(
@@ -706,7 +706,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_tum(
@@ -715,7 +715,7 @@ vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_tum(
@@ -724,7 +724,7 @@ vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_tum(
@@ -733,7 +733,7 @@ vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_tum(
@@ -742,7 +742,7 @@ vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_tum(
@@ -751,7 +751,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_tum(
@@ -760,7 +760,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_tum(
@@ -769,7 +769,7 @@ vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_tum(
@@ -778,7 +778,7 @@ vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_tum(
@@ -787,7 +787,7 @@ vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_tum(
@@ -796,7 +796,7 @@ vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_tum(
@@ -805,7 +805,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_tum(
@@ -814,7 +814,7 @@ vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_tum(
@@ -823,7 +823,7 @@ vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_tum(
@@ -832,7 +832,7 @@ vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_tum(
@@ -841,7 +841,7 @@ vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_tum(
@@ -850,7 +850,7 @@ vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_tum(
@@ -859,7 +859,7 @@ vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_tum(
@@ -868,7 +868,7 @@ vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_tum(
@@ -877,7 +877,7 @@ vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_tum(
@@ -886,7 +886,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_tum(
@@ -895,7 +895,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_tum(
@@ -904,7 +904,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_tum(
@@ -913,7 +913,7 @@ vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_tum(
@@ -922,7 +922,7 @@ vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_tum(
@@ -931,7 +931,7 @@ vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_tum(
@@ -940,7 +940,7 @@ vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const ui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_tum(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_tum(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_tum(
@@ -967,7 +967,7 @@ vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_tum(
@@ -976,7 +976,7 @@ vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_tum(
@@ -985,7 +985,7 @@ vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_tum(
@@ -994,7 +994,7 @@ vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_tum(
@@ -1003,7 +1003,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_tum(
@@ -1012,7 +1012,7 @@ vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_tum(
@@ -1021,7 +1021,7 @@ vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_tum(
@@ -1030,7 +1030,7 @@ vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_tum(
@@ -1039,7 +1039,7 @@ vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_tum(
@@ -1048,7 +1048,7 @@ vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_tum(
@@ -1057,7 +1057,7 @@ vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_tum(
@@ -1066,7 +1066,7 @@ vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tum(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_tumu(
@@ -1075,7 +1075,7 @@ vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_tumu(
@@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_tumu(
@@ -1093,7 +1093,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_tumu(
@@ -1102,7 +1102,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_tumu(
@@ -1111,7 +1111,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_tumu(
@@ -1120,7 +1120,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_tumu(
@@ -1129,7 +1129,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_tumu(
@@ -1138,7 +1138,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_tumu(
@@ -1147,7 +1147,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_tumu(
@@ -1156,7 +1156,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_tumu(
@@ -1165,7 +1165,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_tumu(
@@ -1174,7 +1174,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_tumu(
@@ -1183,7 +1183,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_tumu(
@@ -1192,7 +1192,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_tumu(
@@ -1201,7 +1201,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, c
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_tumu(
@@ -1210,7 +1210,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_tumu(
@@ -1219,7 +1219,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_tumu(
@@ -1228,7 +1228,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_tumu(
@@ -1237,7 +1237,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_tumu(
@@ -1246,7 +1246,7 @@ vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_tumu(
@@ -1255,7 +1255,7 @@ vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_tumu(
@@ -1264,7 +1264,7 @@ vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_tumu(
@@ -1273,7 +1273,7 @@ vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_tumu(
@@ -1282,7 +1282,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_tumu(
@@ -1291,7 +1291,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_tumu(
@@ -1300,7 +1300,7 @@ vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_tumu(
@@ -1309,7 +1309,7 @@ vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_tumu(
@@ -1318,7 +1318,7 @@ vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_tumu(
@@ -1327,7 +1327,7 @@ vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_tumu(
@@ -1336,7 +1336,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_tumu(
@@ -1345,7 +1345,7 @@ vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_tumu(
@@ -1354,7 +1354,7 @@ vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_tumu(
@@ -1363,7 +1363,7 @@ vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_tumu(
@@ -1372,7 +1372,7 @@ vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_tumu(
@@ -1381,7 +1381,7 @@ vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_tumu(
@@ -1390,7 +1390,7 @@ vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_tumu(
@@ -1399,7 +1399,7 @@ vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_tumu(
@@ -1408,7 +1408,7 @@ vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_tumu(
@@ -1417,7 +1417,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_tumu(
@@ -1426,7 +1426,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_tumu(
@@ -1435,7 +1435,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_tumu(
@@ -1444,7 +1444,7 @@ vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_tumu(
@@ -1453,7 +1453,7 @@ vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_tumu(
@@ -1462,7 +1462,7 @@ vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_tumu(
@@ -1471,7 +1471,7 @@ vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_tumu(
@@ -1480,7 +1480,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_tumu(
@@ -1489,7 +1489,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_tumu(
@@ -1498,7 +1498,7 @@ vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_tumu(
@@ -1507,7 +1507,7 @@ vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_tumu(
@@ -1516,7 +1516,7 @@ vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_tumu(
@@ -1525,7 +1525,7 @@ vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_tumu(
@@ -1534,7 +1534,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_tumu(
@@ -1543,7 +1543,7 @@ vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_tumu(
@@ -1552,7 +1552,7 @@ vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_tumu(
@@ -1561,7 +1561,7 @@ vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_tumu(
@@ -1570,7 +1570,7 @@ vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_tumu(
@@ -1579,7 +1579,7 @@ vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_tumu(
@@ -1588,7 +1588,7 @@ vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_tumu(
@@ -1597,7 +1597,7 @@ vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_tumu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_mu(
@@ -1606,7 +1606,7 @@ vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_mu(
@@ -1615,7 +1615,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_mu(
@@ -1624,7 +1624,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_mu(
@@ -1633,7 +1633,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_mu(
@@ -1642,7 +1642,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_mu(
@@ -1651,7 +1651,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_mu(
@@ -1660,7 +1660,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_mu(
@@ -1669,7 +1669,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_mu(
@@ -1678,7 +1678,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_mu(
@@ -1687,7 +1687,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_mu(
@@ -1696,7 +1696,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_mu(
@@ -1705,7 +1705,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_mu(
@@ -1714,7 +1714,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_mu(
@@ -1723,7 +1723,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_mu(
@@ -1732,7 +1732,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, con
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_mu(
@@ -1741,7 +1741,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_mu(
@@ -1750,7 +1750,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_mu(
@@ -1759,7 +1759,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_mu(
@@ -1768,7 +1768,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_mu(
@@ -1777,7 +1777,7 @@ vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_mu(
@@ -1786,7 +1786,7 @@ vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_mu(
@@ -1795,7 +1795,7 @@ vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_mu(
@@ -1804,7 +1804,7 @@ vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_mu(
@@ -1813,7 +1813,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_mu(
@@ -1822,7 +1822,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_mu(
@@ -1831,7 +1831,7 @@ vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_mu(
@@ -1840,7 +1840,7 @@ vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_mu(
@@ -1849,7 +1849,7 @@ vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_mu(
@@ -1858,7 +1858,7 @@ vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_mu(
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, cons
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_mu(
@@ -1876,7 +1876,7 @@ vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_mu(
@@ -1885,7 +1885,7 @@ vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_mu(
@@ -1894,7 +1894,7 @@ vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_mu(
@@ -1903,7 +1903,7 @@ vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_mu(
@@ -1912,7 +1912,7 @@ vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_mu(
@@ -1921,7 +1921,7 @@ vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_mu(
@@ -1930,7 +1930,7 @@ vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const i
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_mu(
@@ -1939,7 +1939,7 @@ vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const in
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_mu(
@@ -1948,7 +1948,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_mu(
@@ -1957,7 +1957,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_mu(
@@ -1966,7 +1966,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_mu(
@@ -1975,7 +1975,7 @@ vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_mu(
@@ -1984,7 +1984,7 @@ vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_mu(
@@ -1993,7 +1993,7 @@ vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_mu(
@@ -2002,7 +2002,7 @@ vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_mu(
@@ -2011,7 +2011,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_mu(
@@ -2020,7 +2020,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_mu(
@@ -2029,7 +2029,7 @@ vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_mu(
@@ -2038,7 +2038,7 @@ vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_mu(
@@ -2047,7 +2047,7 @@ vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_mu(
@@ -2056,7 +2056,7 @@ vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_mu(
@@ -2065,7 +2065,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, co
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_mu(
@@ -2074,7 +2074,7 @@ vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_mu(
@@ -2083,7 +2083,7 @@ vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_mu(
@@ -2092,7 +2092,7 @@ vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_mu(
@@ -2101,7 +2101,7 @@ vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_mu(
@@ -2110,7 +2110,7 @@ vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_mu(
@@ -2119,7 +2119,7 @@ vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_mu(
@@ -2128,6 +2128,6 @@ vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vluxei8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxei8_mu(mask, maskedoff, base, bindex, vl);
+ return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c
index 9a83bfc908ad..3a422216db72 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vluxseg2ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vluxseg2ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vluxseg2ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_tu(
@@ -69,7 +69,7 @@ void test_vluxseg2ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_tu(
@@ -82,7 +82,7 @@ void test_vluxseg2ei16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_tu(
@@ -95,7 +95,7 @@ void test_vluxseg2ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_tu(
@@ -108,7 +108,7 @@ void test_vluxseg2ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_tu(
@@ -121,7 +121,7 @@ void test_vluxseg2ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_tu(
@@ -134,7 +134,7 @@ void test_vluxseg2ei16_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_tu(
@@ -147,7 +147,7 @@ void test_vluxseg2ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_tu(
@@ -160,7 +160,7 @@ void test_vluxseg2ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_tu(
@@ -173,7 +173,7 @@ void test_vluxseg2ei16_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_tu(
@@ -186,7 +186,7 @@ void test_vluxseg2ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_tu(
@@ -199,7 +199,7 @@ void test_vluxseg2ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_tu(
@@ -212,7 +212,7 @@ void test_vluxseg2ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_tu(
@@ -225,7 +225,7 @@ void test_vluxseg2ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_tu(
@@ -238,7 +238,7 @@ void test_vluxseg2ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_tu(
@@ -251,7 +251,7 @@ void test_vluxseg2ei16_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_tu(
@@ -264,7 +264,7 @@ void test_vluxseg2ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vluxseg2ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_tu(
@@ -290,7 +290,7 @@ void test_vluxseg2ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_tu(
@@ -303,7 +303,7 @@ void test_vluxseg2ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_tu(
@@ -316,7 +316,7 @@ void test_vluxseg2ei16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_tu(
@@ -329,7 +329,7 @@ void test_vluxseg2ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_tu(
@@ -342,7 +342,7 @@ void test_vluxseg2ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_tu(
@@ -355,7 +355,7 @@ void test_vluxseg2ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_tu(
@@ -368,7 +368,7 @@ void test_vluxseg2ei16_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_tu(
@@ -381,7 +381,7 @@ void test_vluxseg2ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_tu(
@@ -394,7 +394,7 @@ void test_vluxseg2ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_tu(
@@ -407,7 +407,7 @@ void test_vluxseg2ei16_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_tu(
@@ -420,7 +420,7 @@ void test_vluxseg2ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_tu(
@@ -433,7 +433,7 @@ void test_vluxseg2ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_tu(
@@ -446,7 +446,7 @@ void test_vluxseg2ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_tu(
@@ -459,7 +459,7 @@ void test_vluxseg2ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_tu(
@@ -472,7 +472,7 @@ void test_vluxseg2ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_tu(
@@ -485,7 +485,7 @@ void test_vluxseg2ei16_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_tu(
@@ -498,7 +498,7 @@ void test_vluxseg2ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_tu(
@@ -511,7 +511,7 @@ void test_vluxseg2ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_tu(
@@ -524,7 +524,7 @@ void test_vluxseg2ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_tu(
@@ -537,7 +537,7 @@ void test_vluxseg2ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_tu(
@@ -550,7 +550,7 @@ void test_vluxseg2ei16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_tu(
@@ -563,7 +563,7 @@ void test_vluxseg2ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_tu(
@@ -576,7 +576,7 @@ void test_vluxseg2ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_tu(
@@ -589,7 +589,7 @@ void test_vluxseg2ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vluxseg2ei16_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_tu(
@@ -615,7 +615,7 @@ void test_vluxseg2ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_tu(
@@ -628,7 +628,7 @@ void test_vluxseg2ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_tum(
@@ -641,7 +641,7 @@ void test_vluxseg2ei16_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_tum(
@@ -654,7 +654,7 @@ void test_vluxseg2ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_tum(
@@ -667,7 +667,7 @@ void test_vluxseg2ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_tum(
@@ -680,7 +680,7 @@ void test_vluxseg2ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_tum(
@@ -693,7 +693,7 @@ void test_vluxseg2ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_tum(
@@ -706,7 +706,7 @@ void test_vluxseg2ei16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_tum(
@@ -719,7 +719,7 @@ void test_vluxseg2ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_tum(
@@ -732,7 +732,7 @@ void test_vluxseg2ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_tum(
@@ -745,7 +745,7 @@ void test_vluxseg2ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_tum(
@@ -758,7 +758,7 @@ void test_vluxseg2ei16_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_tum(
@@ -771,7 +771,7 @@ void test_vluxseg2ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_tum(
@@ -784,7 +784,7 @@ void test_vluxseg2ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_tum(
@@ -797,7 +797,7 @@ void test_vluxseg2ei16_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_tum(
@@ -810,7 +810,7 @@ void test_vluxseg2ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_tum(
@@ -823,7 +823,7 @@ void test_vluxseg2ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_tum(
@@ -836,7 +836,7 @@ void test_vluxseg2ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_tum(
@@ -849,7 +849,7 @@ void test_vluxseg2ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_tum(
@@ -862,7 +862,7 @@ void test_vluxseg2ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_tum(
@@ -875,7 +875,7 @@ void test_vluxseg2ei16_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_tum(
@@ -888,7 +888,7 @@ void test_vluxseg2ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vluxseg2ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_tum(
@@ -914,7 +914,7 @@ void test_vluxseg2ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_tum(
@@ -927,7 +927,7 @@ void test_vluxseg2ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_tum(
@@ -940,7 +940,7 @@ void test_vluxseg2ei16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_tum(
@@ -953,7 +953,7 @@ void test_vluxseg2ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_tum(
@@ -966,7 +966,7 @@ void test_vluxseg2ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_tum(
@@ -979,7 +979,7 @@ void test_vluxseg2ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_tum(
@@ -992,7 +992,7 @@ void test_vluxseg2ei16_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_tum(
@@ -1005,7 +1005,7 @@ void test_vluxseg2ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_tum(
@@ -1018,7 +1018,7 @@ void test_vluxseg2ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_tum(
@@ -1031,7 +1031,7 @@ void test_vluxseg2ei16_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_tum(
@@ -1044,7 +1044,7 @@ void test_vluxseg2ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_tum(
@@ -1057,7 +1057,7 @@ void test_vluxseg2ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_tum(
@@ -1070,7 +1070,7 @@ void test_vluxseg2ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_tum(
@@ -1083,7 +1083,7 @@ void test_vluxseg2ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_tum(
@@ -1096,7 +1096,7 @@ void test_vluxseg2ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_tum(
@@ -1109,7 +1109,7 @@ void test_vluxseg2ei16_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_tum(
@@ -1122,7 +1122,7 @@ void test_vluxseg2ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_tum(
@@ -1135,7 +1135,7 @@ void test_vluxseg2ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_tum(
@@ -1148,7 +1148,7 @@ void test_vluxseg2ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_tum(
@@ -1161,7 +1161,7 @@ void test_vluxseg2ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_tum(
@@ -1174,7 +1174,7 @@ void test_vluxseg2ei16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_tum(
@@ -1187,7 +1187,7 @@ void test_vluxseg2ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_tum(
@@ -1200,7 +1200,7 @@ void test_vluxseg2ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_tum(
@@ -1213,7 +1213,7 @@ void test_vluxseg2ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_tum(
@@ -1226,7 +1226,7 @@ void test_vluxseg2ei16_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_tum(
@@ -1239,7 +1239,7 @@ void test_vluxseg2ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_tum(
@@ -1252,7 +1252,7 @@ void test_vluxseg2ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_tumu(
@@ -1265,7 +1265,7 @@ void test_vluxseg2ei16_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_tumu(
@@ -1278,7 +1278,7 @@ void test_vluxseg2ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_tumu(
@@ -1291,7 +1291,7 @@ void test_vluxseg2ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_tumu(
@@ -1304,7 +1304,7 @@ void test_vluxseg2ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_tumu(
@@ -1317,7 +1317,7 @@ void test_vluxseg2ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_tumu(
@@ -1330,7 +1330,7 @@ void test_vluxseg2ei16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_tumu(
@@ -1343,7 +1343,7 @@ void test_vluxseg2ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_tumu(
@@ -1356,7 +1356,7 @@ void test_vluxseg2ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg2ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_tumu(
@@ -1382,7 +1382,7 @@ void test_vluxseg2ei16_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_tumu(
@@ -1395,7 +1395,7 @@ void test_vluxseg2ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_tumu(
@@ -1408,7 +1408,7 @@ void test_vluxseg2ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_tumu(
@@ -1421,7 +1421,7 @@ void test_vluxseg2ei16_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_tumu(
@@ -1434,7 +1434,7 @@ void test_vluxseg2ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_tumu(
@@ -1447,7 +1447,7 @@ void test_vluxseg2ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_tumu(
@@ -1460,7 +1460,7 @@ void test_vluxseg2ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_tumu(
@@ -1473,7 +1473,7 @@ void test_vluxseg2ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_tumu(
@@ -1486,7 +1486,7 @@ void test_vluxseg2ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_tumu(
@@ -1499,7 +1499,7 @@ void test_vluxseg2ei16_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_tumu(
@@ -1512,7 +1512,7 @@ void test_vluxseg2ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_tumu(
@@ -1525,7 +1525,7 @@ void test_vluxseg2ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_tumu(
@@ -1538,7 +1538,7 @@ void test_vluxseg2ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_tumu(
@@ -1551,7 +1551,7 @@ void test_vluxseg2ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vluxseg2ei16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_tumu(
@@ -1577,7 +1577,7 @@ void test_vluxseg2ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_tumu(
@@ -1590,7 +1590,7 @@ void test_vluxseg2ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_tumu(
@@ -1603,7 +1603,7 @@ void test_vluxseg2ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_tumu(
@@ -1616,7 +1616,7 @@ void test_vluxseg2ei16_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_tumu(
@@ -1629,7 +1629,7 @@ void test_vluxseg2ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_tumu(
@@ -1642,7 +1642,7 @@ void test_vluxseg2ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_tumu(
@@ -1655,7 +1655,7 @@ void test_vluxseg2ei16_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_tumu(
@@ -1668,7 +1668,7 @@ void test_vluxseg2ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_tumu(
@@ -1681,7 +1681,7 @@ void test_vluxseg2ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_tumu(
@@ -1694,7 +1694,7 @@ void test_vluxseg2ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_tumu(
@@ -1707,7 +1707,7 @@ void test_vluxseg2ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_tumu(
@@ -1720,7 +1720,7 @@ void test_vluxseg2ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_tumu(
@@ -1733,7 +1733,7 @@ void test_vluxseg2ei16_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_tumu(
@@ -1746,7 +1746,7 @@ void test_vluxseg2ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_tumu(
@@ -1759,7 +1759,7 @@ void test_vluxseg2ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_tumu(
@@ -1772,7 +1772,7 @@ void test_vluxseg2ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_tumu(
@@ -1785,7 +1785,7 @@ void test_vluxseg2ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_tumu(
@@ -1798,7 +1798,7 @@ void test_vluxseg2ei16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_tumu(
@@ -1811,7 +1811,7 @@ void test_vluxseg2ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_tumu(
@@ -1824,7 +1824,7 @@ void test_vluxseg2ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_tumu(
@@ -1837,7 +1837,7 @@ void test_vluxseg2ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_tumu(
@@ -1850,7 +1850,7 @@ void test_vluxseg2ei16_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_tumu(
@@ -1863,7 +1863,7 @@ void test_vluxseg2ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_tumu(
@@ -1876,7 +1876,7 @@ void test_vluxseg2ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_mu(
@@ -1889,7 +1889,7 @@ void test_vluxseg2ei16_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_mu(
@@ -1902,7 +1902,7 @@ void test_vluxseg2ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vluxseg2ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_mu(
@@ -1928,7 +1928,7 @@ void test_vluxseg2ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_mu(
@@ -1941,7 +1941,7 @@ void test_vluxseg2ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vluxseg2ei16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_mu(
@@ -1967,7 +1967,7 @@ void test_vluxseg2ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_mu(
@@ -1980,7 +1980,7 @@ void test_vluxseg2ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_mu(
@@ -1993,7 +1993,7 @@ void test_vluxseg2ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_mu(
@@ -2006,7 +2006,7 @@ void test_vluxseg2ei16_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_mu(
@@ -2019,7 +2019,7 @@ void test_vluxseg2ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_mu(
@@ -2032,7 +2032,7 @@ void test_vluxseg2ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_mu(
@@ -2045,7 +2045,7 @@ void test_vluxseg2ei16_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_mu(
@@ -2058,7 +2058,7 @@ void test_vluxseg2ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_mu(
@@ -2071,7 +2071,7 @@ void test_vluxseg2ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_mu(
@@ -2084,7 +2084,7 @@ void test_vluxseg2ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_mu(
@@ -2097,7 +2097,7 @@ void test_vluxseg2ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_mu(
@@ -2110,7 +2110,7 @@ void test_vluxseg2ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_mu(
@@ -2123,7 +2123,7 @@ void test_vluxseg2ei16_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_mu(
@@ -2136,7 +2136,7 @@ void test_vluxseg2ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_mu(
@@ -2149,7 +2149,7 @@ void test_vluxseg2ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_mu(
@@ -2162,7 +2162,7 @@ void test_vluxseg2ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_mu(
@@ -2175,7 +2175,7 @@ void test_vluxseg2ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_mu(
@@ -2188,7 +2188,7 @@ void test_vluxseg2ei16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_mu(
@@ -2201,7 +2201,7 @@ void test_vluxseg2ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_mu(
@@ -2214,7 +2214,7 @@ void test_vluxseg2ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_mu(
@@ -2227,7 +2227,7 @@ void test_vluxseg2ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_mu(
@@ -2240,7 +2240,7 @@ void test_vluxseg2ei16_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_mu(
@@ -2253,7 +2253,7 @@ void test_vluxseg2ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_mu(
@@ -2266,7 +2266,7 @@ void test_vluxseg2ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_mu(
@@ -2279,7 +2279,7 @@ void test_vluxseg2ei16_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_mu(
@@ -2292,7 +2292,7 @@ void test_vluxseg2ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_mu(
@@ -2305,7 +2305,7 @@ void test_vluxseg2ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_mu(
@@ -2318,7 +2318,7 @@ void test_vluxseg2ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_mu(
@@ -2331,7 +2331,7 @@ void test_vluxseg2ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_mu(
@@ -2344,7 +2344,7 @@ void test_vluxseg2ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_mu(
@@ -2357,7 +2357,7 @@ void test_vluxseg2ei16_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_mu(
@@ -2370,7 +2370,7 @@ void test_vluxseg2ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_mu(
@@ -2383,7 +2383,7 @@ void test_vluxseg2ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_mu(
@@ -2396,7 +2396,7 @@ void test_vluxseg2ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_mu(
@@ -2409,7 +2409,7 @@ void test_vluxseg2ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_mu(
@@ -2422,7 +2422,7 @@ void test_vluxseg2ei16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_mu(
@@ -2435,7 +2435,7 @@ void test_vluxseg2ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_mu(
@@ -2448,7 +2448,7 @@ void test_vluxseg2ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_mu(
@@ -2461,7 +2461,7 @@ void test_vluxseg2ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_mu(
@@ -2474,7 +2474,7 @@ void test_vluxseg2ei16_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_mu(
@@ -2487,7 +2487,7 @@ void test_vluxseg2ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_mu(
@@ -2500,6 +2500,6 @@ void test_vluxseg2ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei16_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c
index 434a2ecf5d66..79fcee5bcb30 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vluxseg2ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vluxseg2ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vluxseg2ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_tu(
@@ -69,7 +69,7 @@ void test_vluxseg2ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_tu(
@@ -82,7 +82,7 @@ void test_vluxseg2ei32_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_tu(
@@ -95,7 +95,7 @@ void test_vluxseg2ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_tu(
@@ -108,7 +108,7 @@ void test_vluxseg2ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_tu(
@@ -121,7 +121,7 @@ void test_vluxseg2ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_tu(
@@ -134,7 +134,7 @@ void test_vluxseg2ei32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_tu(
@@ -147,7 +147,7 @@ void test_vluxseg2ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_tu(
@@ -160,7 +160,7 @@ void test_vluxseg2ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_tu(
@@ -173,7 +173,7 @@ void test_vluxseg2ei32_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_tu(
@@ -186,7 +186,7 @@ void test_vluxseg2ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_tu(
@@ -199,7 +199,7 @@ void test_vluxseg2ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_tu(
@@ -212,7 +212,7 @@ void test_vluxseg2ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_tu(
@@ -225,7 +225,7 @@ void test_vluxseg2ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_tu(
@@ -238,7 +238,7 @@ void test_vluxseg2ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_tu(
@@ -251,7 +251,7 @@ void test_vluxseg2ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_tu(
@@ -264,7 +264,7 @@ void test_vluxseg2ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_tu(
@@ -277,7 +277,7 @@ void test_vluxseg2ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_tu(
@@ -290,7 +290,7 @@ void test_vluxseg2ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tu(
@@ -303,7 +303,7 @@ void test_vluxseg2ei32_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_tu(
@@ -316,7 +316,7 @@ void test_vluxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_tu(
@@ -329,7 +329,7 @@ void test_vluxseg2ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_tu(
@@ -342,7 +342,7 @@ void test_vluxseg2ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_tu(
@@ -355,7 +355,7 @@ void test_vluxseg2ei32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_tu(
@@ -368,7 +368,7 @@ void test_vluxseg2ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_tu(
@@ -381,7 +381,7 @@ void test_vluxseg2ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_tu(
@@ -394,7 +394,7 @@ void test_vluxseg2ei32_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_tu(
@@ -407,7 +407,7 @@ void test_vluxseg2ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_tu(
@@ -420,7 +420,7 @@ void test_vluxseg2ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_tu(
@@ -433,7 +433,7 @@ void test_vluxseg2ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_tu(
@@ -446,7 +446,7 @@ void test_vluxseg2ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_tu(
@@ -459,7 +459,7 @@ void test_vluxseg2ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_tu(
@@ -472,7 +472,7 @@ void test_vluxseg2ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_tu(
@@ -485,7 +485,7 @@ void test_vluxseg2ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_tu(
@@ -498,7 +498,7 @@ void test_vluxseg2ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_tu(
@@ -511,7 +511,7 @@ void test_vluxseg2ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_tu(
@@ -524,7 +524,7 @@ void test_vluxseg2ei32_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_tu(
@@ -537,7 +537,7 @@ void test_vluxseg2ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_tu(
@@ -550,7 +550,7 @@ void test_vluxseg2ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_tu(
@@ -563,7 +563,7 @@ void test_vluxseg2ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_tu(
@@ -576,7 +576,7 @@ void test_vluxseg2ei32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_tu(
@@ -589,7 +589,7 @@ void test_vluxseg2ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_tu(
@@ -602,7 +602,7 @@ void test_vluxseg2ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_tum(
@@ -615,7 +615,7 @@ void test_vluxseg2ei32_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_tum(
@@ -628,7 +628,7 @@ void test_vluxseg2ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_tum(
@@ -641,7 +641,7 @@ void test_vluxseg2ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_tum(
@@ -654,7 +654,7 @@ void test_vluxseg2ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_tum(
@@ -667,7 +667,7 @@ void test_vluxseg2ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_tum(
@@ -680,7 +680,7 @@ void test_vluxseg2ei32_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_tum(
@@ -693,7 +693,7 @@ void test_vluxseg2ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_tum(
@@ -706,7 +706,7 @@ void test_vluxseg2ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_tum(
@@ -719,7 +719,7 @@ void test_vluxseg2ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_tum(
@@ -732,7 +732,7 @@ void test_vluxseg2ei32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_tum(
@@ -745,7 +745,7 @@ void test_vluxseg2ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_tum(
@@ -758,7 +758,7 @@ void test_vluxseg2ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_tum(
@@ -771,7 +771,7 @@ void test_vluxseg2ei32_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_tum(
@@ -784,7 +784,7 @@ void test_vluxseg2ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_tum(
@@ -797,7 +797,7 @@ void test_vluxseg2ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_tum(
@@ -810,7 +810,7 @@ void test_vluxseg2ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_tum(
@@ -823,7 +823,7 @@ void test_vluxseg2ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_tum(
@@ -836,7 +836,7 @@ void test_vluxseg2ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_tum(
@@ -849,7 +849,7 @@ void test_vluxseg2ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_tum(
@@ -862,7 +862,7 @@ void test_vluxseg2ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_tum(
@@ -875,7 +875,7 @@ void test_vluxseg2ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_tum(
@@ -888,7 +888,7 @@ void test_vluxseg2ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tum(
@@ -901,7 +901,7 @@ void test_vluxseg2ei32_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_tum(
@@ -914,7 +914,7 @@ void test_vluxseg2ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_tum(
@@ -927,7 +927,7 @@ void test_vluxseg2ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_tum(
@@ -940,7 +940,7 @@ void test_vluxseg2ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_tum(
@@ -953,7 +953,7 @@ void test_vluxseg2ei32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_tum(
@@ -966,7 +966,7 @@ void test_vluxseg2ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_tum(
@@ -979,7 +979,7 @@ void test_vluxseg2ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_tum(
@@ -992,7 +992,7 @@ void test_vluxseg2ei32_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_tum(
@@ -1005,7 +1005,7 @@ void test_vluxseg2ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_tum(
@@ -1018,7 +1018,7 @@ void test_vluxseg2ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_tum(
@@ -1031,7 +1031,7 @@ void test_vluxseg2ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_tum(
@@ -1044,7 +1044,7 @@ void test_vluxseg2ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_tum(
@@ -1057,7 +1057,7 @@ void test_vluxseg2ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_tum(
@@ -1070,7 +1070,7 @@ void test_vluxseg2ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_tum(
@@ -1083,7 +1083,7 @@ void test_vluxseg2ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_tum(
@@ -1096,7 +1096,7 @@ void test_vluxseg2ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_tum(
@@ -1109,7 +1109,7 @@ void test_vluxseg2ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_tum(
@@ -1122,7 +1122,7 @@ void test_vluxseg2ei32_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_tum(
@@ -1135,7 +1135,7 @@ void test_vluxseg2ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_tum(
@@ -1148,7 +1148,7 @@ void test_vluxseg2ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_tum(
@@ -1161,7 +1161,7 @@ void test_vluxseg2ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_tum(
@@ -1174,7 +1174,7 @@ void test_vluxseg2ei32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_tum(
@@ -1187,7 +1187,7 @@ void test_vluxseg2ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_tum(
@@ -1200,7 +1200,7 @@ void test_vluxseg2ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_tumu(
@@ -1213,7 +1213,7 @@ void test_vluxseg2ei32_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_tumu(
@@ -1226,7 +1226,7 @@ void test_vluxseg2ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vluxseg2ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_tumu(
@@ -1252,7 +1252,7 @@ void test_vluxseg2ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_tumu(
@@ -1265,7 +1265,7 @@ void test_vluxseg2ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_tumu(
@@ -1278,7 +1278,7 @@ void test_vluxseg2ei32_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_tumu(
@@ -1291,7 +1291,7 @@ void test_vluxseg2ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_tumu(
@@ -1304,7 +1304,7 @@ void test_vluxseg2ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_tumu(
@@ -1317,7 +1317,7 @@ void test_vluxseg2ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_tumu(
@@ -1330,7 +1330,7 @@ void test_vluxseg2ei32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_tumu(
@@ -1343,7 +1343,7 @@ void test_vluxseg2ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_tumu(
@@ -1356,7 +1356,7 @@ void test_vluxseg2ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg2ei32_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_tumu(
@@ -1382,7 +1382,7 @@ void test_vluxseg2ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_tumu(
@@ -1395,7 +1395,7 @@ void test_vluxseg2ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_tumu(
@@ -1408,7 +1408,7 @@ void test_vluxseg2ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_tumu(
@@ -1421,7 +1421,7 @@ void test_vluxseg2ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_tumu(
@@ -1434,7 +1434,7 @@ void test_vluxseg2ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_tumu(
@@ -1447,7 +1447,7 @@ void test_vluxseg2ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_tumu(
@@ -1460,7 +1460,7 @@ void test_vluxseg2ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_tumu(
@@ -1473,7 +1473,7 @@ void test_vluxseg2ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_tumu(
@@ -1486,7 +1486,7 @@ void test_vluxseg2ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tumu(
@@ -1499,7 +1499,7 @@ void test_vluxseg2ei32_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_tumu(
@@ -1512,7 +1512,7 @@ void test_vluxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_tumu(
@@ -1525,7 +1525,7 @@ void test_vluxseg2ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_tumu(
@@ -1538,7 +1538,7 @@ void test_vluxseg2ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vluxseg2ei32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_tumu(
@@ -1564,7 +1564,7 @@ void test_vluxseg2ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_tumu(
@@ -1577,7 +1577,7 @@ void test_vluxseg2ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_tumu(
@@ -1590,7 +1590,7 @@ void test_vluxseg2ei32_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_tumu(
@@ -1603,7 +1603,7 @@ void test_vluxseg2ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_tumu(
@@ -1616,7 +1616,7 @@ void test_vluxseg2ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vluxseg2ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_tumu(
@@ -1642,7 +1642,7 @@ void test_vluxseg2ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_tumu(
@@ -1655,7 +1655,7 @@ void test_vluxseg2ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_tumu(
@@ -1668,7 +1668,7 @@ void test_vluxseg2ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_tumu(
@@ -1681,7 +1681,7 @@ void test_vluxseg2ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_tumu(
@@ -1694,7 +1694,7 @@ void test_vluxseg2ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_tumu(
@@ -1707,7 +1707,7 @@ void test_vluxseg2ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_tumu(
@@ -1720,7 +1720,7 @@ void test_vluxseg2ei32_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_tumu(
@@ -1733,7 +1733,7 @@ void test_vluxseg2ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_tumu(
@@ -1746,7 +1746,7 @@ void test_vluxseg2ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_tumu(
@@ -1759,7 +1759,7 @@ void test_vluxseg2ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_tumu(
@@ -1772,7 +1772,7 @@ void test_vluxseg2ei32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_tumu(
@@ -1785,7 +1785,7 @@ void test_vluxseg2ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_tumu(
@@ -1798,7 +1798,7 @@ void test_vluxseg2ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_mu(
@@ -1811,7 +1811,7 @@ void test_vluxseg2ei32_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_mu(
@@ -1824,7 +1824,7 @@ void test_vluxseg2ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_mu(
@@ -1837,7 +1837,7 @@ void test_vluxseg2ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_mu(
@@ -1850,7 +1850,7 @@ void test_vluxseg2ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_mu(
@@ -1863,7 +1863,7 @@ void test_vluxseg2ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_mu(
@@ -1876,7 +1876,7 @@ void test_vluxseg2ei32_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_mu(
@@ -1889,7 +1889,7 @@ void test_vluxseg2ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_mu(
@@ -1902,7 +1902,7 @@ void test_vluxseg2ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_mu(
@@ -1915,7 +1915,7 @@ void test_vluxseg2ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_mu(
@@ -1928,7 +1928,7 @@ void test_vluxseg2ei32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_mu(
@@ -1941,7 +1941,7 @@ void test_vluxseg2ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_mu(
@@ -1954,7 +1954,7 @@ void test_vluxseg2ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_mu(
@@ -1967,7 +1967,7 @@ void test_vluxseg2ei32_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_mu(
@@ -1980,7 +1980,7 @@ void test_vluxseg2ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_mu(
@@ -1993,7 +1993,7 @@ void test_vluxseg2ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_mu(
@@ -2006,7 +2006,7 @@ void test_vluxseg2ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_mu(
@@ -2019,7 +2019,7 @@ void test_vluxseg2ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_mu(
@@ -2032,7 +2032,7 @@ void test_vluxseg2ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_mu(
@@ -2045,7 +2045,7 @@ void test_vluxseg2ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_mu(
@@ -2058,7 +2058,7 @@ void test_vluxseg2ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_mu(
@@ -2071,7 +2071,7 @@ void test_vluxseg2ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_mu(
@@ -2084,7 +2084,7 @@ void test_vluxseg2ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_mu(
@@ -2097,7 +2097,7 @@ void test_vluxseg2ei32_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_mu(
@@ -2110,7 +2110,7 @@ void test_vluxseg2ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_mu(
@@ -2123,7 +2123,7 @@ void test_vluxseg2ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_mu(
@@ -2136,7 +2136,7 @@ void test_vluxseg2ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_mu(
@@ -2149,7 +2149,7 @@ void test_vluxseg2ei32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_mu(
@@ -2162,7 +2162,7 @@ void test_vluxseg2ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_mu(
@@ -2175,7 +2175,7 @@ void test_vluxseg2ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_mu(
@@ -2188,7 +2188,7 @@ void test_vluxseg2ei32_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_mu(
@@ -2201,7 +2201,7 @@ void test_vluxseg2ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vluxseg2ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_mu(
@@ -2227,7 +2227,7 @@ void test_vluxseg2ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_mu(
@@ -2240,7 +2240,7 @@ void test_vluxseg2ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_mu(
@@ -2253,7 +2253,7 @@ void test_vluxseg2ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_mu(
@@ -2266,7 +2266,7 @@ void test_vluxseg2ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vluxseg2ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_mu(
@@ -2292,7 +2292,7 @@ void test_vluxseg2ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_mu(
@@ -2305,7 +2305,7 @@ void test_vluxseg2ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_mu(
@@ -2318,7 +2318,7 @@ void test_vluxseg2ei32_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_mu(
@@ -2331,7 +2331,7 @@ void test_vluxseg2ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_mu(
@@ -2344,7 +2344,7 @@ void test_vluxseg2ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_mu(
@@ -2357,7 +2357,7 @@ void test_vluxseg2ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_mu(
@@ -2370,7 +2370,7 @@ void test_vluxseg2ei32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_mu(
@@ -2383,7 +2383,7 @@ void test_vluxseg2ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_mu(
@@ -2396,6 +2396,6 @@ void test_vluxseg2ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei32_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c
index 3a48c97be8dd..802cc0c9f8ff 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vluxseg2ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vluxseg2ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vluxseg2ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_tu(
@@ -69,7 +69,7 @@ void test_vluxseg2ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_tu(
@@ -82,7 +82,7 @@ void test_vluxseg2ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_tu(
@@ -95,7 +95,7 @@ void test_vluxseg2ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_tu(
@@ -108,7 +108,7 @@ void test_vluxseg2ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_tu(
@@ -121,7 +121,7 @@ void test_vluxseg2ei64_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_tu(
@@ -134,7 +134,7 @@ void test_vluxseg2ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_tu(
@@ -147,7 +147,7 @@ void test_vluxseg2ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_tu(
@@ -160,7 +160,7 @@ void test_vluxseg2ei64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_tu(
@@ -173,7 +173,7 @@ void test_vluxseg2ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_tu(
@@ -186,7 +186,7 @@ void test_vluxseg2ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vluxseg2ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_tu(
@@ -212,7 +212,7 @@ void test_vluxseg2ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_tu(
@@ -225,7 +225,7 @@ void test_vluxseg2ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_tu(
@@ -238,7 +238,7 @@ void test_vluxseg2ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_tu(
@@ -251,7 +251,7 @@ void test_vluxseg2ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_tu(
@@ -264,7 +264,7 @@ void test_vluxseg2ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_tu(
@@ -277,7 +277,7 @@ void test_vluxseg2ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_tu(
@@ -290,7 +290,7 @@ void test_vluxseg2ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_tu(
@@ -303,7 +303,7 @@ void test_vluxseg2ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_tu(
@@ -316,7 +316,7 @@ void test_vluxseg2ei64_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_tu(
@@ -329,7 +329,7 @@ void test_vluxseg2ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_tu(
@@ -342,7 +342,7 @@ void test_vluxseg2ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_tu(
@@ -355,7 +355,7 @@ void test_vluxseg2ei64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_tu(
@@ -368,7 +368,7 @@ void test_vluxseg2ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_tu(
@@ -381,7 +381,7 @@ void test_vluxseg2ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_tu(
@@ -394,7 +394,7 @@ void test_vluxseg2ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_tu(
@@ -407,7 +407,7 @@ void test_vluxseg2ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_tu(
@@ -420,7 +420,7 @@ void test_vluxseg2ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_tu(
@@ -433,7 +433,7 @@ void test_vluxseg2ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_tu(
@@ -446,7 +446,7 @@ void test_vluxseg2ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_tu(
@@ -459,7 +459,7 @@ void test_vluxseg2ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_tu(
@@ -472,7 +472,7 @@ void test_vluxseg2ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_tu(
@@ -485,7 +485,7 @@ void test_vluxseg2ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_tu(
@@ -498,7 +498,7 @@ void test_vluxseg2ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_tu(
@@ -511,7 +511,7 @@ void test_vluxseg2ei64_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_tu(
@@ -524,7 +524,7 @@ void test_vluxseg2ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_tu(
@@ -537,7 +537,7 @@ void test_vluxseg2ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_tum(
@@ -550,7 +550,7 @@ void test_vluxseg2ei64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_tum(
@@ -563,7 +563,7 @@ void test_vluxseg2ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_tum(
@@ -576,7 +576,7 @@ void test_vluxseg2ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_tum(
@@ -589,7 +589,7 @@ void test_vluxseg2ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_tum(
@@ -602,7 +602,7 @@ void test_vluxseg2ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_tum(
@@ -615,7 +615,7 @@ void test_vluxseg2ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_tum(
@@ -628,7 +628,7 @@ void test_vluxseg2ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_tum(
@@ -641,7 +641,7 @@ void test_vluxseg2ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_tum(
@@ -654,7 +654,7 @@ void test_vluxseg2ei64_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_tum(
@@ -667,7 +667,7 @@ void test_vluxseg2ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_tum(
@@ -680,7 +680,7 @@ void test_vluxseg2ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_tum(
@@ -693,7 +693,7 @@ void test_vluxseg2ei64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_tum(
@@ -706,7 +706,7 @@ void test_vluxseg2ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_tum(
@@ -719,7 +719,7 @@ void test_vluxseg2ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_tum(
@@ -732,7 +732,7 @@ void test_vluxseg2ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_tum(
@@ -745,7 +745,7 @@ void test_vluxseg2ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_tum(
@@ -758,7 +758,7 @@ void test_vluxseg2ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_tum(
@@ -771,7 +771,7 @@ void test_vluxseg2ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_tum(
@@ -784,7 +784,7 @@ void test_vluxseg2ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_tum(
@@ -797,7 +797,7 @@ void test_vluxseg2ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_tum(
@@ -810,7 +810,7 @@ void test_vluxseg2ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_tum(
@@ -823,7 +823,7 @@ void test_vluxseg2ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_tum(
@@ -836,7 +836,7 @@ void test_vluxseg2ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_tum(
@@ -849,7 +849,7 @@ void test_vluxseg2ei64_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_tum(
@@ -862,7 +862,7 @@ void test_vluxseg2ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_tum(
@@ -875,7 +875,7 @@ void test_vluxseg2ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_tum(
@@ -888,7 +888,7 @@ void test_vluxseg2ei64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_tum(
@@ -901,7 +901,7 @@ void test_vluxseg2ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_tum(
@@ -914,7 +914,7 @@ void test_vluxseg2ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_tum(
@@ -927,7 +927,7 @@ void test_vluxseg2ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_tum(
@@ -940,7 +940,7 @@ void test_vluxseg2ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_tum(
@@ -953,7 +953,7 @@ void test_vluxseg2ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_tum(
@@ -966,7 +966,7 @@ void test_vluxseg2ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_tum(
@@ -979,7 +979,7 @@ void test_vluxseg2ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_tum(
@@ -992,7 +992,7 @@ void test_vluxseg2ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_tum(
@@ -1005,7 +1005,7 @@ void test_vluxseg2ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_tum(
@@ -1018,7 +1018,7 @@ void test_vluxseg2ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_tum(
@@ -1031,7 +1031,7 @@ void test_vluxseg2ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_tum(
@@ -1044,7 +1044,7 @@ void test_vluxseg2ei64_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_tum(
@@ -1057,7 +1057,7 @@ void test_vluxseg2ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_tum(
@@ -1070,7 +1070,7 @@ void test_vluxseg2ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_tumu(
@@ -1083,7 +1083,7 @@ void test_vluxseg2ei64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_tumu(
@@ -1096,7 +1096,7 @@ void test_vluxseg2ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_tumu(
@@ -1109,7 +1109,7 @@ void test_vluxseg2ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_tumu(
@@ -1122,7 +1122,7 @@ void test_vluxseg2ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_tumu(
@@ -1135,7 +1135,7 @@ void test_vluxseg2ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_tumu(
@@ -1148,7 +1148,7 @@ void test_vluxseg2ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_tumu(
@@ -1161,7 +1161,7 @@ void test_vluxseg2ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_tumu(
@@ -1174,7 +1174,7 @@ void test_vluxseg2ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_tumu(
@@ -1187,7 +1187,7 @@ void test_vluxseg2ei64_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_tumu(
@@ -1200,7 +1200,7 @@ void test_vluxseg2ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_tumu(
@@ -1213,7 +1213,7 @@ void test_vluxseg2ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_tumu(
@@ -1226,7 +1226,7 @@ void test_vluxseg2ei64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_tumu(
@@ -1239,7 +1239,7 @@ void test_vluxseg2ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_tumu(
@@ -1252,7 +1252,7 @@ void test_vluxseg2ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_tumu(
@@ -1265,7 +1265,7 @@ void test_vluxseg2ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_tumu(
@@ -1278,7 +1278,7 @@ void test_vluxseg2ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_tumu(
@@ -1291,7 +1291,7 @@ void test_vluxseg2ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_tumu(
@@ -1304,7 +1304,7 @@ void test_vluxseg2ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_tumu(
@@ -1317,7 +1317,7 @@ void test_vluxseg2ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_tumu(
@@ -1330,7 +1330,7 @@ void test_vluxseg2ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_tumu(
@@ -1343,7 +1343,7 @@ void test_vluxseg2ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_tumu(
@@ -1356,7 +1356,7 @@ void test_vluxseg2ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg2ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_tumu(
@@ -1382,7 +1382,7 @@ void test_vluxseg2ei64_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_tumu(
@@ -1395,7 +1395,7 @@ void test_vluxseg2ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_tumu(
@@ -1408,7 +1408,7 @@ void test_vluxseg2ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_tumu(
@@ -1421,7 +1421,7 @@ void test_vluxseg2ei64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_tumu(
@@ -1434,7 +1434,7 @@ void test_vluxseg2ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_tumu(
@@ -1447,7 +1447,7 @@ void test_vluxseg2ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_tumu(
@@ -1460,7 +1460,7 @@ void test_vluxseg2ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_tumu(
@@ -1473,7 +1473,7 @@ void test_vluxseg2ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_tumu(
@@ -1486,7 +1486,7 @@ void test_vluxseg2ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vluxseg2ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_tumu(
@@ -1512,7 +1512,7 @@ void test_vluxseg2ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_tumu(
@@ -1525,7 +1525,7 @@ void test_vluxseg2ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_tumu(
@@ -1538,7 +1538,7 @@ void test_vluxseg2ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_tumu(
@@ -1551,7 +1551,7 @@ void test_vluxseg2ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_tumu(
@@ -1564,7 +1564,7 @@ void test_vluxseg2ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_tumu(
@@ -1577,7 +1577,7 @@ void test_vluxseg2ei64_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_tumu(
@@ -1590,7 +1590,7 @@ void test_vluxseg2ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_tumu(
@@ -1603,7 +1603,7 @@ void test_vluxseg2ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_mu(
@@ -1616,7 +1616,7 @@ void test_vluxseg2ei64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_mu(
@@ -1629,7 +1629,7 @@ void test_vluxseg2ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_mu(
@@ -1642,7 +1642,7 @@ void test_vluxseg2ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_mu(
@@ -1655,7 +1655,7 @@ void test_vluxseg2ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_mu(
@@ -1668,7 +1668,7 @@ void test_vluxseg2ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_mu(
@@ -1681,7 +1681,7 @@ void test_vluxseg2ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_mu(
@@ -1694,7 +1694,7 @@ void test_vluxseg2ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_mu(
@@ -1707,7 +1707,7 @@ void test_vluxseg2ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_mu(
@@ -1720,7 +1720,7 @@ void test_vluxseg2ei64_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_mu(
@@ -1733,7 +1733,7 @@ void test_vluxseg2ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_mu(
@@ -1746,7 +1746,7 @@ void test_vluxseg2ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_mu(
@@ -1759,7 +1759,7 @@ void test_vluxseg2ei64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_mu(
@@ -1772,7 +1772,7 @@ void test_vluxseg2ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_mu(
@@ -1785,7 +1785,7 @@ void test_vluxseg2ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_mu(
@@ -1798,7 +1798,7 @@ void test_vluxseg2ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_mu(
@@ -1811,7 +1811,7 @@ void test_vluxseg2ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_mu(
@@ -1824,7 +1824,7 @@ void test_vluxseg2ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_mu(
@@ -1837,7 +1837,7 @@ void test_vluxseg2ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_mu(
@@ -1850,7 +1850,7 @@ void test_vluxseg2ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_mu(
@@ -1863,7 +1863,7 @@ void test_vluxseg2ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_mu(
@@ -1876,7 +1876,7 @@ void test_vluxseg2ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_mu(
@@ -1889,7 +1889,7 @@ void test_vluxseg2ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_mu(
@@ -1902,7 +1902,7 @@ void test_vluxseg2ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_mu(
@@ -1915,7 +1915,7 @@ void test_vluxseg2ei64_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_mu(
@@ -1928,7 +1928,7 @@ void test_vluxseg2ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_mu(
@@ -1941,7 +1941,7 @@ void test_vluxseg2ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_mu(
@@ -1954,7 +1954,7 @@ void test_vluxseg2ei64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_mu(
@@ -1967,7 +1967,7 @@ void test_vluxseg2ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_mu(
@@ -1980,7 +1980,7 @@ void test_vluxseg2ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_mu(
@@ -1993,7 +1993,7 @@ void test_vluxseg2ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_mu(
@@ -2006,7 +2006,7 @@ void test_vluxseg2ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_mu(
@@ -2019,7 +2019,7 @@ void test_vluxseg2ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_mu(
@@ -2032,7 +2032,7 @@ void test_vluxseg2ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_mu(
@@ -2045,7 +2045,7 @@ void test_vluxseg2ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_mu(
@@ -2058,7 +2058,7 @@ void test_vluxseg2ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_mu(
@@ -2071,7 +2071,7 @@ void test_vluxseg2ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_mu(
@@ -2084,7 +2084,7 @@ void test_vluxseg2ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_mu(
@@ -2097,7 +2097,7 @@ void test_vluxseg2ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_mu(
@@ -2110,7 +2110,7 @@ void test_vluxseg2ei64_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_mu(
@@ -2123,7 +2123,7 @@ void test_vluxseg2ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_mu(
@@ -2136,6 +2136,6 @@ void test_vluxseg2ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c
index 5438dad9081e..ac4e375d11b7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c
@@ -17,7 +17,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_tu(
@@ -30,7 +30,7 @@ void test_vluxseg2ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_tu(
@@ -43,7 +43,7 @@ void test_vluxseg2ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_tu(
@@ -56,7 +56,7 @@ void test_vluxseg2ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_tu(
@@ -69,7 +69,7 @@ void test_vluxseg2ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_tu(
@@ -82,7 +82,7 @@ void test_vluxseg2ei8_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_tu(
@@ -95,7 +95,7 @@ void test_vluxseg2ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_tu(
@@ -108,7 +108,7 @@ void test_vluxseg2ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_tu(
@@ -121,7 +121,7 @@ void test_vluxseg2ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_tu(
@@ -134,7 +134,7 @@ void test_vluxseg2ei8_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_tu(
@@ -147,7 +147,7 @@ void test_vluxseg2ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_tu(
@@ -160,7 +160,7 @@ void test_vluxseg2ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_tu(
@@ -173,7 +173,7 @@ void test_vluxseg2ei8_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_tu(
@@ -186,7 +186,7 @@ void test_vluxseg2ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_tu(
@@ -199,7 +199,7 @@ void test_vluxseg2ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_tu(
@@ -212,7 +212,7 @@ void test_vluxseg2ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_tu(
@@ -225,7 +225,7 @@ void test_vluxseg2ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedof
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_tu(
@@ -238,7 +238,7 @@ void test_vluxseg2ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedof
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_tu(
@@ -251,7 +251,7 @@ void test_vluxseg2ei8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedof
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_tu(
@@ -264,7 +264,7 @@ void test_vluxseg2ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vluxseg2ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_tu(
@@ -290,7 +290,7 @@ void test_vluxseg2ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_tu(
@@ -303,7 +303,7 @@ void test_vluxseg2ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_tu(
@@ -316,7 +316,7 @@ void test_vluxseg2ei8_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_tu(
@@ -329,7 +329,7 @@ void test_vluxseg2ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_tu(
@@ -342,7 +342,7 @@ void test_vluxseg2ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_tu(
@@ -355,7 +355,7 @@ void test_vluxseg2ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_tu(
@@ -368,7 +368,7 @@ void test_vluxseg2ei8_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_tu(
@@ -381,7 +381,7 @@ void test_vluxseg2ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_tu(
@@ -394,7 +394,7 @@ void test_vluxseg2ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_tu(
@@ -407,7 +407,7 @@ void test_vluxseg2ei8_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_tu(
@@ -420,7 +420,7 @@ void test_vluxseg2ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_tu(
@@ -433,7 +433,7 @@ void test_vluxseg2ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_tu(
@@ -446,7 +446,7 @@ void test_vluxseg2ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_tu(
@@ -459,7 +459,7 @@ void test_vluxseg2ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_tu(
@@ -472,7 +472,7 @@ void test_vluxseg2ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_tu(
@@ -485,7 +485,7 @@ void test_vluxseg2ei8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maske
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_tu(
@@ -498,7 +498,7 @@ void test_vluxseg2ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_tu(
@@ -511,7 +511,7 @@ void test_vluxseg2ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_tu(
@@ -524,7 +524,7 @@ void test_vluxseg2ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_tu(
@@ -537,7 +537,7 @@ void test_vluxseg2ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_tu(
@@ -550,7 +550,7 @@ void test_vluxseg2ei8_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_tu(
@@ -563,7 +563,7 @@ void test_vluxseg2ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_tu(
@@ -576,7 +576,7 @@ void test_vluxseg2ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_tu(
@@ -589,7 +589,7 @@ void test_vluxseg2ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vluxseg2ei8_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_tu(
@@ -615,7 +615,7 @@ void test_vluxseg2ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_tu(
@@ -628,7 +628,7 @@ void test_vluxseg2ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_tum(
@@ -641,7 +641,7 @@ void test_vluxseg2ei8_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_tum(
@@ -654,7 +654,7 @@ void test_vluxseg2ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_tum(
@@ -667,7 +667,7 @@ void test_vluxseg2ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_tum(
@@ -680,7 +680,7 @@ void test_vluxseg2ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_tum(
@@ -693,7 +693,7 @@ void test_vluxseg2ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_tum(
@@ -706,7 +706,7 @@ void test_vluxseg2ei8_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_tum(
@@ -719,7 +719,7 @@ void test_vluxseg2ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_tum(
@@ -732,7 +732,7 @@ void test_vluxseg2ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_tum(
@@ -745,7 +745,7 @@ void test_vluxseg2ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_tum(
@@ -758,7 +758,7 @@ void test_vluxseg2ei8_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_tum(
@@ -771,7 +771,7 @@ void test_vluxseg2ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_tum(
@@ -784,7 +784,7 @@ void test_vluxseg2ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_tum(
@@ -797,7 +797,7 @@ void test_vluxseg2ei8_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_tum(
@@ -810,7 +810,7 @@ void test_vluxseg2ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_tum(
@@ -823,7 +823,7 @@ void test_vluxseg2ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_tum(
@@ -836,7 +836,7 @@ void test_vluxseg2ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_tum(
@@ -849,7 +849,7 @@ void test_vluxseg2ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_tum(
@@ -862,7 +862,7 @@ void test_vluxseg2ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_tum(
@@ -875,7 +875,7 @@ void test_vluxseg2ei8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_tum(
@@ -888,7 +888,7 @@ void test_vluxseg2ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vluxseg2ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_tum(
@@ -914,7 +914,7 @@ void test_vluxseg2ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_tum(
@@ -927,7 +927,7 @@ void test_vluxseg2ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_tum(
@@ -940,7 +940,7 @@ void test_vluxseg2ei8_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_tum(
@@ -953,7 +953,7 @@ void test_vluxseg2ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_tum(
@@ -966,7 +966,7 @@ void test_vluxseg2ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_tum(
@@ -979,7 +979,7 @@ void test_vluxseg2ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_tum(
@@ -992,7 +992,7 @@ void test_vluxseg2ei8_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_tum(
@@ -1005,7 +1005,7 @@ void test_vluxseg2ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_tum(
@@ -1018,7 +1018,7 @@ void test_vluxseg2ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_tum(
@@ -1031,7 +1031,7 @@ void test_vluxseg2ei8_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_tum(
@@ -1044,7 +1044,7 @@ void test_vluxseg2ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_tum(
@@ -1057,7 +1057,7 @@ void test_vluxseg2ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_tum(
@@ -1070,7 +1070,7 @@ void test_vluxseg2ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_tum(
@@ -1083,7 +1083,7 @@ void test_vluxseg2ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_tum(
@@ -1096,7 +1096,7 @@ void test_vluxseg2ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_tum(
@@ -1109,7 +1109,7 @@ void test_vluxseg2ei8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_tum(
@@ -1122,7 +1122,7 @@ void test_vluxseg2ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_tum(
@@ -1135,7 +1135,7 @@ void test_vluxseg2ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_tum(
@@ -1148,7 +1148,7 @@ void test_vluxseg2ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_tum(
@@ -1161,7 +1161,7 @@ void test_vluxseg2ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_tum(
@@ -1174,7 +1174,7 @@ void test_vluxseg2ei8_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_tum(
@@ -1187,7 +1187,7 @@ void test_vluxseg2ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_tum(
@@ -1200,7 +1200,7 @@ void test_vluxseg2ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_tum(
@@ -1213,7 +1213,7 @@ void test_vluxseg2ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_tum(
@@ -1226,7 +1226,7 @@ void test_vluxseg2ei8_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_tum(
@@ -1239,7 +1239,7 @@ void test_vluxseg2ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_tum(
@@ -1252,7 +1252,7 @@ void test_vluxseg2ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_tumu(
@@ -1265,7 +1265,7 @@ void test_vluxseg2ei8_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_tumu(
@@ -1278,7 +1278,7 @@ void test_vluxseg2ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_tumu(
@@ -1291,7 +1291,7 @@ void test_vluxseg2ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_tumu(
@@ -1304,7 +1304,7 @@ void test_vluxseg2ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_tumu(
@@ -1317,7 +1317,7 @@ void test_vluxseg2ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_tumu(
@@ -1330,7 +1330,7 @@ void test_vluxseg2ei8_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_tumu(
@@ -1343,7 +1343,7 @@ void test_vluxseg2ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_tumu(
@@ -1356,7 +1356,7 @@ void test_vluxseg2ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg2ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_tumu(
@@ -1382,7 +1382,7 @@ void test_vluxseg2ei8_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_tumu(
@@ -1395,7 +1395,7 @@ void test_vluxseg2ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_tumu(
@@ -1408,7 +1408,7 @@ void test_vluxseg2ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_tumu(
@@ -1421,7 +1421,7 @@ void test_vluxseg2ei8_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_tumu(
@@ -1434,7 +1434,7 @@ void test_vluxseg2ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_tumu(
@@ -1447,7 +1447,7 @@ void test_vluxseg2ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_tumu(
@@ -1460,7 +1460,7 @@ void test_vluxseg2ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_tumu(
@@ -1473,7 +1473,7 @@ void test_vluxseg2ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_tumu(
@@ -1486,7 +1486,7 @@ void test_vluxseg2ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_tumu(
@@ -1499,7 +1499,7 @@ void test_vluxseg2ei8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_tumu(
@@ -1512,7 +1512,7 @@ void test_vluxseg2ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_tumu(
@@ -1525,7 +1525,7 @@ void test_vluxseg2ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_tumu(
@@ -1538,7 +1538,7 @@ void test_vluxseg2ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_tumu(
@@ -1551,7 +1551,7 @@ void test_vluxseg2ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vluxseg2ei8_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_tumu(
@@ -1577,7 +1577,7 @@ void test_vluxseg2ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_tumu(
@@ -1590,7 +1590,7 @@ void test_vluxseg2ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_tumu(
@@ -1603,7 +1603,7 @@ void test_vluxseg2ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_tumu(
@@ -1616,7 +1616,7 @@ void test_vluxseg2ei8_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_tumu(
@@ -1629,7 +1629,7 @@ void test_vluxseg2ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_tumu(
@@ -1642,7 +1642,7 @@ void test_vluxseg2ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_tumu(
@@ -1655,7 +1655,7 @@ void test_vluxseg2ei8_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_tumu(
@@ -1668,7 +1668,7 @@ void test_vluxseg2ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_tumu(
@@ -1681,7 +1681,7 @@ void test_vluxseg2ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_tumu(
@@ -1694,7 +1694,7 @@ void test_vluxseg2ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_tumu(
@@ -1707,7 +1707,7 @@ void test_vluxseg2ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_tumu(
@@ -1720,7 +1720,7 @@ void test_vluxseg2ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_tumu(
@@ -1733,7 +1733,7 @@ void test_vluxseg2ei8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_tumu(
@@ -1746,7 +1746,7 @@ void test_vluxseg2ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_tumu(
@@ -1759,7 +1759,7 @@ void test_vluxseg2ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_tumu(
@@ -1772,7 +1772,7 @@ void test_vluxseg2ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_tumu(
@@ -1785,7 +1785,7 @@ void test_vluxseg2ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_tumu(
@@ -1798,7 +1798,7 @@ void test_vluxseg2ei8_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_tumu(
@@ -1811,7 +1811,7 @@ void test_vluxseg2ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_tumu(
@@ -1824,7 +1824,7 @@ void test_vluxseg2ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_tumu(
@@ -1837,7 +1837,7 @@ void test_vluxseg2ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_tumu(
@@ -1850,7 +1850,7 @@ void test_vluxseg2ei8_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_tumu(
@@ -1863,7 +1863,7 @@ void test_vluxseg2ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_tumu(
@@ -1876,7 +1876,7 @@ void test_vluxseg2ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_mu(
@@ -1889,7 +1889,7 @@ void test_vluxseg2ei8_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_mu(
@@ -1902,7 +1902,7 @@ void test_vluxseg2ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vluxseg2ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_mu(
@@ -1928,7 +1928,7 @@ void test_vluxseg2ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_mu(
@@ -1941,7 +1941,7 @@ void test_vluxseg2ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vluxseg2ei8_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_mu(
@@ -1967,7 +1967,7 @@ void test_vluxseg2ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_mu(
@@ -1980,7 +1980,7 @@ void test_vluxseg2ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_mu(
@@ -1993,7 +1993,7 @@ void test_vluxseg2ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_mu(
@@ -2006,7 +2006,7 @@ void test_vluxseg2ei8_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_mu(
@@ -2019,7 +2019,7 @@ void test_vluxseg2ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_mu(
@@ -2032,7 +2032,7 @@ void test_vluxseg2ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_mu(
@@ -2045,7 +2045,7 @@ void test_vluxseg2ei8_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_mu(
@@ -2058,7 +2058,7 @@ void test_vluxseg2ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_mu(
@@ -2071,7 +2071,7 @@ void test_vluxseg2ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_mu(
@@ -2084,7 +2084,7 @@ void test_vluxseg2ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_mu(
@@ -2097,7 +2097,7 @@ void test_vluxseg2ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_mu(
@@ -2110,7 +2110,7 @@ void test_vluxseg2ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_mu(
@@ -2123,7 +2123,7 @@ void test_vluxseg2ei8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_mu(
@@ -2136,7 +2136,7 @@ void test_vluxseg2ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_mu(
@@ -2149,7 +2149,7 @@ void test_vluxseg2ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_mu(
@@ -2162,7 +2162,7 @@ void test_vluxseg2ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_mu(
@@ -2175,7 +2175,7 @@ void test_vluxseg2ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_mu(
@@ -2188,7 +2188,7 @@ void test_vluxseg2ei8_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_mu(
@@ -2201,7 +2201,7 @@ void test_vluxseg2ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_mu(
@@ -2214,7 +2214,7 @@ void test_vluxseg2ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_mu(
@@ -2227,7 +2227,7 @@ void test_vluxseg2ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_mu(
@@ -2240,7 +2240,7 @@ void test_vluxseg2ei8_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_mu(
@@ -2253,7 +2253,7 @@ void test_vluxseg2ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_mu(
@@ -2266,7 +2266,7 @@ void test_vluxseg2ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_mu(
@@ -2279,7 +2279,7 @@ void test_vluxseg2ei8_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_mu(
@@ -2292,7 +2292,7 @@ void test_vluxseg2ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_mu(
@@ -2305,7 +2305,7 @@ void test_vluxseg2ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_mu(
@@ -2318,7 +2318,7 @@ void test_vluxseg2ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_mu(
@@ -2331,7 +2331,7 @@ void test_vluxseg2ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_mu(
@@ -2344,7 +2344,7 @@ void test_vluxseg2ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_mu(
@@ -2357,7 +2357,7 @@ void test_vluxseg2ei8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_mu(
@@ -2370,7 +2370,7 @@ void test_vluxseg2ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_mu(
@@ -2383,7 +2383,7 @@ void test_vluxseg2ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_mu(
@@ -2396,7 +2396,7 @@ void test_vluxseg2ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_mu(
@@ -2409,7 +2409,7 @@ void test_vluxseg2ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_mu(
@@ -2422,7 +2422,7 @@ void test_vluxseg2ei8_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_mu(
@@ -2435,7 +2435,7 @@ void test_vluxseg2ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_mu(
@@ -2448,7 +2448,7 @@ void test_vluxseg2ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_mu(
@@ -2461,7 +2461,7 @@ void test_vluxseg2ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_mu(
@@ -2474,7 +2474,7 @@ void test_vluxseg2ei8_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_mu(
@@ -2487,7 +2487,7 @@ void test_vluxseg2ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_mu(
@@ -2500,6 +2500,6 @@ void test_vluxseg2ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg2ei8_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+ return __riscv_vluxseg2ei8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c
index 98ad37b8d9af..30b97b32ee2b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vluxseg3ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vluxseg3ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vluxseg3ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_tu(
@@ -79,7 +79,7 @@ void test_vluxseg3ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_tu(
@@ -94,7 +94,7 @@ void test_vluxseg3ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_tu(
@@ -109,7 +109,7 @@ void test_vluxseg3ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_tu(
@@ -124,7 +124,7 @@ void test_vluxseg3ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_tu(
@@ -139,7 +139,7 @@ void test_vluxseg3ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_tu(
@@ -154,7 +154,7 @@ void test_vluxseg3ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_tu(
@@ -169,7 +169,7 @@ void test_vluxseg3ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_tu(
@@ -184,7 +184,7 @@ void test_vluxseg3ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vluxseg3ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_tu(
@@ -214,7 +214,7 @@ void test_vluxseg3ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_tu(
@@ -229,7 +229,7 @@ void test_vluxseg3ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_tu(
@@ -244,7 +244,7 @@ void test_vluxseg3ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_tu(
@@ -259,7 +259,7 @@ void test_vluxseg3ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_tu(
@@ -274,7 +274,7 @@ void test_vluxseg3ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_tu(
@@ -289,7 +289,7 @@ void test_vluxseg3ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_tu(
@@ -304,7 +304,7 @@ void test_vluxseg3ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_tu(
@@ -319,7 +319,7 @@ void test_vluxseg3ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_tu(
@@ -334,7 +334,7 @@ void test_vluxseg3ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_tu(
@@ -349,7 +349,7 @@ void test_vluxseg3ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_tu(
@@ -364,7 +364,7 @@ void test_vluxseg3ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_tu(
@@ -379,7 +379,7 @@ void test_vluxseg3ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_tu(
@@ -394,7 +394,7 @@ void test_vluxseg3ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_tu(
@@ -409,7 +409,7 @@ void test_vluxseg3ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_tu(
@@ -424,7 +424,7 @@ void test_vluxseg3ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_tu(
@@ -439,7 +439,7 @@ void test_vluxseg3ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_tu(
@@ -454,7 +454,7 @@ void test_vluxseg3ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_tu(
@@ -469,7 +469,7 @@ void test_vluxseg3ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_tu(
@@ -484,7 +484,7 @@ void test_vluxseg3ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_tu(
@@ -499,7 +499,7 @@ void test_vluxseg3ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_tu(
@@ -514,7 +514,7 @@ void test_vluxseg3ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_tu(
@@ -529,7 +529,7 @@ void test_vluxseg3ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_tu(
@@ -544,7 +544,7 @@ void test_vluxseg3ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_tu(
@@ -559,7 +559,7 @@ void test_vluxseg3ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_tum(
@@ -574,7 +574,7 @@ void test_vluxseg3ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_tum(
@@ -589,7 +589,7 @@ void test_vluxseg3ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_tum(
@@ -604,7 +604,7 @@ void test_vluxseg3ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_tum(
@@ -619,7 +619,7 @@ void test_vluxseg3ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vluxseg3ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_tum(
@@ -649,7 +649,7 @@ void test_vluxseg3ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_tum(
@@ -664,7 +664,7 @@ void test_vluxseg3ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_tum(
@@ -679,7 +679,7 @@ void test_vluxseg3ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_tum(
@@ -694,7 +694,7 @@ void test_vluxseg3ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_tum(
@@ -709,7 +709,7 @@ void test_vluxseg3ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_tum(
@@ -724,7 +724,7 @@ void test_vluxseg3ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vluxseg3ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_tum(
@@ -754,7 +754,7 @@ void test_vluxseg3ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_tum(
@@ -769,7 +769,7 @@ void test_vluxseg3ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_tum(
@@ -784,7 +784,7 @@ void test_vluxseg3ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_tum(
@@ -799,7 +799,7 @@ void test_vluxseg3ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_tum(
@@ -814,7 +814,7 @@ void test_vluxseg3ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_tum(
@@ -829,7 +829,7 @@ void test_vluxseg3ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vluxseg3ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_tum(
@@ -859,7 +859,7 @@ void test_vluxseg3ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_tum(
@@ -874,7 +874,7 @@ void test_vluxseg3ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_tum(
@@ -889,7 +889,7 @@ void test_vluxseg3ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_tum(
@@ -904,7 +904,7 @@ void test_vluxseg3ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_tum(
@@ -919,7 +919,7 @@ void test_vluxseg3ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_tum(
@@ -934,7 +934,7 @@ void test_vluxseg3ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vluxseg3ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_tum(
@@ -964,7 +964,7 @@ void test_vluxseg3ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_tum(
@@ -979,7 +979,7 @@ void test_vluxseg3ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_tum(
@@ -994,7 +994,7 @@ void test_vluxseg3ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_tum(
@@ -1009,7 +1009,7 @@ void test_vluxseg3ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_tum(
@@ -1024,7 +1024,7 @@ void test_vluxseg3ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_tum(
@@ -1039,7 +1039,7 @@ void test_vluxseg3ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg3ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_tum(
@@ -1069,7 +1069,7 @@ void test_vluxseg3ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_tum(
@@ -1084,7 +1084,7 @@ void test_vluxseg3ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_tum(
@@ -1099,7 +1099,7 @@ void test_vluxseg3ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_tum(
@@ -1114,7 +1114,7 @@ void test_vluxseg3ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_tumu(
@@ -1129,7 +1129,7 @@ void test_vluxseg3ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_tumu(
@@ -1144,7 +1144,7 @@ void test_vluxseg3ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vluxseg3ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_tumu(
@@ -1174,7 +1174,7 @@ void test_vluxseg3ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_tumu(
@@ -1189,7 +1189,7 @@ void test_vluxseg3ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_tumu(
@@ -1204,7 +1204,7 @@ void test_vluxseg3ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_tumu(
@@ -1219,7 +1219,7 @@ void test_vluxseg3ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_tumu(
@@ -1234,7 +1234,7 @@ void test_vluxseg3ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_tumu(
@@ -1249,7 +1249,7 @@ void test_vluxseg3ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_tumu(
@@ -1264,7 +1264,7 @@ void test_vluxseg3ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vluxseg3ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_tumu(
@@ -1294,7 +1294,7 @@ void test_vluxseg3ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_tumu(
@@ -1309,7 +1309,7 @@ void test_vluxseg3ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_tumu(
@@ -1324,7 +1324,7 @@ void test_vluxseg3ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_tumu(
@@ -1339,7 +1339,7 @@ void test_vluxseg3ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vluxseg3ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg3ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_tumu(
@@ -1384,7 +1384,7 @@ void test_vluxseg3ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_tumu(
@@ -1399,7 +1399,7 @@ void test_vluxseg3ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_tumu(
@@ -1414,7 +1414,7 @@ void test_vluxseg3ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg3ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_tumu(
@@ -1444,7 +1444,7 @@ void test_vluxseg3ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_tumu(
@@ -1459,7 +1459,7 @@ void test_vluxseg3ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_tumu(
@@ -1474,7 +1474,7 @@ void test_vluxseg3ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_tumu(
@@ -1489,7 +1489,7 @@ void test_vluxseg3ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_tumu(
@@ -1504,7 +1504,7 @@ void test_vluxseg3ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_tumu(
@@ -1519,7 +1519,7 @@ void test_vluxseg3ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_tumu(
@@ -1534,7 +1534,7 @@ void test_vluxseg3ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_tumu(
@@ -1549,7 +1549,7 @@ void test_vluxseg3ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vluxseg3ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg3ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_tumu(
@@ -1594,7 +1594,7 @@ void test_vluxseg3ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_tumu(
@@ -1609,7 +1609,7 @@ void test_vluxseg3ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_tumu(
@@ -1624,7 +1624,7 @@ void test_vluxseg3ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_tumu(
@@ -1639,7 +1639,7 @@ void test_vluxseg3ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_tumu(
@@ -1654,7 +1654,7 @@ void test_vluxseg3ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_tumu(
@@ -1669,7 +1669,7 @@ void test_vluxseg3ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_mu(
@@ -1684,7 +1684,7 @@ void test_vluxseg3ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_mu(
@@ -1699,7 +1699,7 @@ void test_vluxseg3ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_mu(
@@ -1714,7 +1714,7 @@ void test_vluxseg3ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_mu(
@@ -1729,7 +1729,7 @@ void test_vluxseg3ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_mu(
@@ -1744,7 +1744,7 @@ void test_vluxseg3ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_mu(
@@ -1759,7 +1759,7 @@ void test_vluxseg3ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_mu(
@@ -1774,7 +1774,7 @@ void test_vluxseg3ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_mu(
@@ -1789,7 +1789,7 @@ void test_vluxseg3ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_mu(
@@ -1804,7 +1804,7 @@ void test_vluxseg3ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_mu(
@@ -1819,7 +1819,7 @@ void test_vluxseg3ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_mu(
@@ -1834,7 +1834,7 @@ void test_vluxseg3ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_mu(
@@ -1849,7 +1849,7 @@ void test_vluxseg3ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_mu(
@@ -1864,7 +1864,7 @@ void test_vluxseg3ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_mu(
@@ -1879,7 +1879,7 @@ void test_vluxseg3ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_mu(
@@ -1894,7 +1894,7 @@ void test_vluxseg3ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_mu(
@@ -1909,7 +1909,7 @@ void test_vluxseg3ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_mu(
@@ -1924,7 +1924,7 @@ void test_vluxseg3ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_mu(
@@ -1939,7 +1939,7 @@ void test_vluxseg3ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vluxseg3ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_mu(
@@ -1969,7 +1969,7 @@ void test_vluxseg3ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_mu(
@@ -1984,7 +1984,7 @@ void test_vluxseg3ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_mu(
@@ -1999,7 +1999,7 @@ void test_vluxseg3ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_mu(
@@ -2014,7 +2014,7 @@ void test_vluxseg3ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_mu(
@@ -2029,7 +2029,7 @@ void test_vluxseg3ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_mu(
@@ -2044,7 +2044,7 @@ void test_vluxseg3ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_mu(
@@ -2059,7 +2059,7 @@ void test_vluxseg3ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_mu(
@@ -2074,7 +2074,7 @@ void test_vluxseg3ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_mu(
@@ -2089,7 +2089,7 @@ void test_vluxseg3ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg3ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_mu(
@@ -2119,7 +2119,7 @@ void test_vluxseg3ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_mu(
@@ -2134,7 +2134,7 @@ void test_vluxseg3ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_mu(
@@ -2149,7 +2149,7 @@ void test_vluxseg3ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_mu(
@@ -2164,7 +2164,7 @@ void test_vluxseg3ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_mu(
@@ -2179,7 +2179,7 @@ void test_vluxseg3ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_mu(
@@ -2194,7 +2194,7 @@ void test_vluxseg3ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_mu(
@@ -2209,7 +2209,7 @@ void test_vluxseg3ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_mu(
@@ -2224,6 +2224,6 @@ void test_vluxseg3ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c
index 7b65b5774ad2..af2a2351710a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vluxseg3ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vluxseg3ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vluxseg3ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_tu(
@@ -79,7 +79,7 @@ void test_vluxseg3ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_tu(
@@ -94,7 +94,7 @@ void test_vluxseg3ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_tu(
@@ -109,7 +109,7 @@ void test_vluxseg3ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_tu(
@@ -124,7 +124,7 @@ void test_vluxseg3ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_tu(
@@ -139,7 +139,7 @@ void test_vluxseg3ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_tu(
@@ -154,7 +154,7 @@ void test_vluxseg3ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_tu(
@@ -169,7 +169,7 @@ void test_vluxseg3ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_tu(
@@ -184,7 +184,7 @@ void test_vluxseg3ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vluxseg3ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_tu(
@@ -214,7 +214,7 @@ void test_vluxseg3ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_tu(
@@ -229,7 +229,7 @@ void test_vluxseg3ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_tu(
@@ -244,7 +244,7 @@ void test_vluxseg3ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_tu(
@@ -259,7 +259,7 @@ void test_vluxseg3ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_tu(
@@ -274,7 +274,7 @@ void test_vluxseg3ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_tu(
@@ -289,7 +289,7 @@ void test_vluxseg3ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_tu(
@@ -304,7 +304,7 @@ void test_vluxseg3ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_tu(
@@ -319,7 +319,7 @@ void test_vluxseg3ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_tu(
@@ -334,7 +334,7 @@ void test_vluxseg3ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_tu(
@@ -349,7 +349,7 @@ void test_vluxseg3ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_tu(
@@ -364,7 +364,7 @@ void test_vluxseg3ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_tu(
@@ -379,7 +379,7 @@ void test_vluxseg3ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_tu(
@@ -394,7 +394,7 @@ void test_vluxseg3ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_tu(
@@ -409,7 +409,7 @@ void test_vluxseg3ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_tu(
@@ -424,7 +424,7 @@ void test_vluxseg3ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_tu(
@@ -439,7 +439,7 @@ void test_vluxseg3ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_tu(
@@ -454,7 +454,7 @@ void test_vluxseg3ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_tu(
@@ -469,7 +469,7 @@ void test_vluxseg3ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_tu(
@@ -484,7 +484,7 @@ void test_vluxseg3ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_tu(
@@ -499,7 +499,7 @@ void test_vluxseg3ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_tu(
@@ -514,7 +514,7 @@ void test_vluxseg3ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_tu(
@@ -529,7 +529,7 @@ void test_vluxseg3ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_tu(
@@ -544,7 +544,7 @@ void test_vluxseg3ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_tu(
@@ -559,7 +559,7 @@ void test_vluxseg3ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_tum(
@@ -574,7 +574,7 @@ void test_vluxseg3ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_tum(
@@ -589,7 +589,7 @@ void test_vluxseg3ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_tum(
@@ -604,7 +604,7 @@ void test_vluxseg3ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_tum(
@@ -619,7 +619,7 @@ void test_vluxseg3ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vluxseg3ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_tum(
@@ -649,7 +649,7 @@ void test_vluxseg3ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_tum(
@@ -664,7 +664,7 @@ void test_vluxseg3ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_tum(
@@ -679,7 +679,7 @@ void test_vluxseg3ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_tum(
@@ -694,7 +694,7 @@ void test_vluxseg3ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_tum(
@@ -709,7 +709,7 @@ void test_vluxseg3ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_tum(
@@ -724,7 +724,7 @@ void test_vluxseg3ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vluxseg3ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_tum(
@@ -754,7 +754,7 @@ void test_vluxseg3ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_tum(
@@ -769,7 +769,7 @@ void test_vluxseg3ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_tum(
@@ -784,7 +784,7 @@ void test_vluxseg3ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_tum(
@@ -799,7 +799,7 @@ void test_vluxseg3ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_tum(
@@ -814,7 +814,7 @@ void test_vluxseg3ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_tum(
@@ -829,7 +829,7 @@ void test_vluxseg3ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vluxseg3ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_tum(
@@ -859,7 +859,7 @@ void test_vluxseg3ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_tum(
@@ -874,7 +874,7 @@ void test_vluxseg3ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_tum(
@@ -889,7 +889,7 @@ void test_vluxseg3ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_tum(
@@ -904,7 +904,7 @@ void test_vluxseg3ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_tum(
@@ -919,7 +919,7 @@ void test_vluxseg3ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_tum(
@@ -934,7 +934,7 @@ void test_vluxseg3ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vluxseg3ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_tum(
@@ -964,7 +964,7 @@ void test_vluxseg3ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_tum(
@@ -979,7 +979,7 @@ void test_vluxseg3ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_tum(
@@ -994,7 +994,7 @@ void test_vluxseg3ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_tum(
@@ -1009,7 +1009,7 @@ void test_vluxseg3ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_tum(
@@ -1024,7 +1024,7 @@ void test_vluxseg3ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_tum(
@@ -1039,7 +1039,7 @@ void test_vluxseg3ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg3ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_tum(
@@ -1069,7 +1069,7 @@ void test_vluxseg3ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_tum(
@@ -1084,7 +1084,7 @@ void test_vluxseg3ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_tum(
@@ -1099,7 +1099,7 @@ void test_vluxseg3ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_tum(
@@ -1114,7 +1114,7 @@ void test_vluxseg3ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_tumu(
@@ -1129,7 +1129,7 @@ void test_vluxseg3ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_tumu(
@@ -1144,7 +1144,7 @@ void test_vluxseg3ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vluxseg3ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_tumu(
@@ -1174,7 +1174,7 @@ void test_vluxseg3ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_tumu(
@@ -1189,7 +1189,7 @@ void test_vluxseg3ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_tumu(
@@ -1204,7 +1204,7 @@ void test_vluxseg3ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_tumu(
@@ -1219,7 +1219,7 @@ void test_vluxseg3ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_tumu(
@@ -1234,7 +1234,7 @@ void test_vluxseg3ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_tumu(
@@ -1249,7 +1249,7 @@ void test_vluxseg3ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_tumu(
@@ -1264,7 +1264,7 @@ void test_vluxseg3ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vluxseg3ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_tumu(
@@ -1294,7 +1294,7 @@ void test_vluxseg3ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_tumu(
@@ -1309,7 +1309,7 @@ void test_vluxseg3ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_tumu(
@@ -1324,7 +1324,7 @@ void test_vluxseg3ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_tumu(
@@ -1339,7 +1339,7 @@ void test_vluxseg3ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vluxseg3ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg3ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_tumu(
@@ -1384,7 +1384,7 @@ void test_vluxseg3ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_tumu(
@@ -1399,7 +1399,7 @@ void test_vluxseg3ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_tumu(
@@ -1414,7 +1414,7 @@ void test_vluxseg3ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg3ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_tumu(
@@ -1444,7 +1444,7 @@ void test_vluxseg3ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_tumu(
@@ -1459,7 +1459,7 @@ void test_vluxseg3ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_tumu(
@@ -1474,7 +1474,7 @@ void test_vluxseg3ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_tumu(
@@ -1489,7 +1489,7 @@ void test_vluxseg3ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_tumu(
@@ -1504,7 +1504,7 @@ void test_vluxseg3ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_tumu(
@@ -1519,7 +1519,7 @@ void test_vluxseg3ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_tumu(
@@ -1534,7 +1534,7 @@ void test_vluxseg3ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_tumu(
@@ -1549,7 +1549,7 @@ void test_vluxseg3ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vluxseg3ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg3ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_tumu(
@@ -1594,7 +1594,7 @@ void test_vluxseg3ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_tumu(
@@ -1609,7 +1609,7 @@ void test_vluxseg3ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_tumu(
@@ -1624,7 +1624,7 @@ void test_vluxseg3ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_tumu(
@@ -1639,7 +1639,7 @@ void test_vluxseg3ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_tumu(
@@ -1654,7 +1654,7 @@ void test_vluxseg3ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_tumu(
@@ -1669,7 +1669,7 @@ void test_vluxseg3ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_mu(
@@ -1684,7 +1684,7 @@ void test_vluxseg3ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_mu(
@@ -1699,7 +1699,7 @@ void test_vluxseg3ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_mu(
@@ -1714,7 +1714,7 @@ void test_vluxseg3ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_mu(
@@ -1729,7 +1729,7 @@ void test_vluxseg3ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_mu(
@@ -1744,7 +1744,7 @@ void test_vluxseg3ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_mu(
@@ -1759,7 +1759,7 @@ void test_vluxseg3ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_mu(
@@ -1774,7 +1774,7 @@ void test_vluxseg3ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_mu(
@@ -1789,7 +1789,7 @@ void test_vluxseg3ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_mu(
@@ -1804,7 +1804,7 @@ void test_vluxseg3ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_mu(
@@ -1819,7 +1819,7 @@ void test_vluxseg3ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_mu(
@@ -1834,7 +1834,7 @@ void test_vluxseg3ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_mu(
@@ -1849,7 +1849,7 @@ void test_vluxseg3ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_mu(
@@ -1864,7 +1864,7 @@ void test_vluxseg3ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_mu(
@@ -1879,7 +1879,7 @@ void test_vluxseg3ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_mu(
@@ -1894,7 +1894,7 @@ void test_vluxseg3ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_mu(
@@ -1909,7 +1909,7 @@ void test_vluxseg3ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_mu(
@@ -1924,7 +1924,7 @@ void test_vluxseg3ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_mu(
@@ -1939,7 +1939,7 @@ void test_vluxseg3ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vluxseg3ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_mu(
@@ -1969,7 +1969,7 @@ void test_vluxseg3ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_mu(
@@ -1984,7 +1984,7 @@ void test_vluxseg3ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_mu(
@@ -1999,7 +1999,7 @@ void test_vluxseg3ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_mu(
@@ -2014,7 +2014,7 @@ void test_vluxseg3ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_mu(
@@ -2029,7 +2029,7 @@ void test_vluxseg3ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_mu(
@@ -2044,7 +2044,7 @@ void test_vluxseg3ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_mu(
@@ -2059,7 +2059,7 @@ void test_vluxseg3ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_mu(
@@ -2074,7 +2074,7 @@ void test_vluxseg3ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_mu(
@@ -2089,7 +2089,7 @@ void test_vluxseg3ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg3ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_mu(
@@ -2119,7 +2119,7 @@ void test_vluxseg3ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_mu(
@@ -2134,7 +2134,7 @@ void test_vluxseg3ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_mu(
@@ -2149,7 +2149,7 @@ void test_vluxseg3ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_mu(
@@ -2164,7 +2164,7 @@ void test_vluxseg3ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_mu(
@@ -2179,7 +2179,7 @@ void test_vluxseg3ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_mu(
@@ -2194,7 +2194,7 @@ void test_vluxseg3ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_mu(
@@ -2209,7 +2209,7 @@ void test_vluxseg3ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_mu(
@@ -2224,6 +2224,6 @@ void test_vluxseg3ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c
index ce8058057e70..ba9252777f5b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vluxseg3ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vluxseg3ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vluxseg3ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_tu(
@@ -79,7 +79,7 @@ void test_vluxseg3ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_tu(
@@ -94,7 +94,7 @@ void test_vluxseg3ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_tu(
@@ -109,7 +109,7 @@ void test_vluxseg3ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_tu(
@@ -124,7 +124,7 @@ void test_vluxseg3ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_tu(
@@ -139,7 +139,7 @@ void test_vluxseg3ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_tu(
@@ -154,7 +154,7 @@ void test_vluxseg3ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_tu(
@@ -169,7 +169,7 @@ void test_vluxseg3ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_tu(
@@ -184,7 +184,7 @@ void test_vluxseg3ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vluxseg3ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_tu(
@@ -214,7 +214,7 @@ void test_vluxseg3ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_tu(
@@ -229,7 +229,7 @@ void test_vluxseg3ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_tu(
@@ -244,7 +244,7 @@ void test_vluxseg3ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_tu(
@@ -259,7 +259,7 @@ void test_vluxseg3ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_tu(
@@ -274,7 +274,7 @@ void test_vluxseg3ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vluxseg3ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_tu(
@@ -304,7 +304,7 @@ void test_vluxseg3ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_tu(
@@ -319,7 +319,7 @@ void test_vluxseg3ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_tu(
@@ -334,7 +334,7 @@ void test_vluxseg3ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_tu(
@@ -349,7 +349,7 @@ void test_vluxseg3ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_tu(
@@ -364,7 +364,7 @@ void test_vluxseg3ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_tu(
@@ -379,7 +379,7 @@ void test_vluxseg3ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_tu(
@@ -394,7 +394,7 @@ void test_vluxseg3ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_tu(
@@ -409,7 +409,7 @@ void test_vluxseg3ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_tu(
@@ -424,7 +424,7 @@ void test_vluxseg3ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_tu(
@@ -439,7 +439,7 @@ void test_vluxseg3ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_tu(
@@ -454,7 +454,7 @@ void test_vluxseg3ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_tu(
@@ -469,7 +469,7 @@ void test_vluxseg3ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_tu(
@@ -484,7 +484,7 @@ void test_vluxseg3ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_tu(
@@ -499,7 +499,7 @@ void test_vluxseg3ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_tu(
@@ -514,7 +514,7 @@ void test_vluxseg3ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_tu(
@@ -529,7 +529,7 @@ void test_vluxseg3ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_tum(
@@ -544,7 +544,7 @@ void test_vluxseg3ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_tum(
@@ -559,7 +559,7 @@ void test_vluxseg3ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_tum(
@@ -574,7 +574,7 @@ void test_vluxseg3ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_tum(
@@ -589,7 +589,7 @@ void test_vluxseg3ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_tum(
@@ -604,7 +604,7 @@ void test_vluxseg3ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_tum(
@@ -619,7 +619,7 @@ void test_vluxseg3ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_tum(
@@ -634,7 +634,7 @@ void test_vluxseg3ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_tum(
@@ -649,7 +649,7 @@ void test_vluxseg3ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_tum(
@@ -664,7 +664,7 @@ void test_vluxseg3ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_tum(
@@ -679,7 +679,7 @@ void test_vluxseg3ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_tum(
@@ -694,7 +694,7 @@ void test_vluxseg3ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_tum(
@@ -709,7 +709,7 @@ void test_vluxseg3ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_tum(
@@ -724,7 +724,7 @@ void test_vluxseg3ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_tum(
@@ -739,7 +739,7 @@ void test_vluxseg3ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_tum(
@@ -754,7 +754,7 @@ void test_vluxseg3ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_tum(
@@ -769,7 +769,7 @@ void test_vluxseg3ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_tum(
@@ -784,7 +784,7 @@ void test_vluxseg3ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_tum(
@@ -799,7 +799,7 @@ void test_vluxseg3ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_tum(
@@ -814,7 +814,7 @@ void test_vluxseg3ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_tum(
@@ -829,7 +829,7 @@ void test_vluxseg3ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_tum(
@@ -844,7 +844,7 @@ void test_vluxseg3ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_tum(
@@ -859,7 +859,7 @@ void test_vluxseg3ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_tum(
@@ -874,7 +874,7 @@ void test_vluxseg3ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_tum(
@@ -889,7 +889,7 @@ void test_vluxseg3ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_tum(
@@ -904,7 +904,7 @@ void test_vluxseg3ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_tum(
@@ -919,7 +919,7 @@ void test_vluxseg3ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_tum(
@@ -934,7 +934,7 @@ void test_vluxseg3ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_tum(
@@ -949,7 +949,7 @@ void test_vluxseg3ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_tum(
@@ -964,7 +964,7 @@ void test_vluxseg3ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_tum(
@@ -979,7 +979,7 @@ void test_vluxseg3ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_tum(
@@ -994,7 +994,7 @@ void test_vluxseg3ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_tum(
@@ -1009,7 +1009,7 @@ void test_vluxseg3ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_tum(
@@ -1024,7 +1024,7 @@ void test_vluxseg3ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_tum(
@@ -1039,7 +1039,7 @@ void test_vluxseg3ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg3ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_tumu(
@@ -1069,7 +1069,7 @@ void test_vluxseg3ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_tumu(
@@ -1084,7 +1084,7 @@ void test_vluxseg3ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_tumu(
@@ -1099,7 +1099,7 @@ void test_vluxseg3ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_tumu(
@@ -1114,7 +1114,7 @@ void test_vluxseg3ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_tumu(
@@ -1129,7 +1129,7 @@ void test_vluxseg3ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_tumu(
@@ -1144,7 +1144,7 @@ void test_vluxseg3ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_tumu(
@@ -1159,7 +1159,7 @@ void test_vluxseg3ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_tumu(
@@ -1174,7 +1174,7 @@ void test_vluxseg3ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_tumu(
@@ -1189,7 +1189,7 @@ void test_vluxseg3ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_tumu(
@@ -1204,7 +1204,7 @@ void test_vluxseg3ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_tumu(
@@ -1219,7 +1219,7 @@ void test_vluxseg3ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_tumu(
@@ -1234,7 +1234,7 @@ void test_vluxseg3ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_tumu(
@@ -1249,7 +1249,7 @@ void test_vluxseg3ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vluxseg3ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_tumu(
@@ -1279,7 +1279,7 @@ void test_vluxseg3ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_tumu(
@@ -1294,7 +1294,7 @@ void test_vluxseg3ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_tumu(
@@ -1309,7 +1309,7 @@ void test_vluxseg3ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_tumu(
@@ -1324,7 +1324,7 @@ void test_vluxseg3ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_tumu(
@@ -1339,7 +1339,7 @@ void test_vluxseg3ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_tumu(
@@ -1354,7 +1354,7 @@ void test_vluxseg3ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg3ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_tumu(
@@ -1384,7 +1384,7 @@ void test_vluxseg3ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_tumu(
@@ -1399,7 +1399,7 @@ void test_vluxseg3ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_tumu(
@@ -1414,7 +1414,7 @@ void test_vluxseg3ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg3ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_tumu(
@@ -1444,7 +1444,7 @@ void test_vluxseg3ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_tumu(
@@ -1459,7 +1459,7 @@ void test_vluxseg3ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_tumu(
@@ -1474,7 +1474,7 @@ void test_vluxseg3ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_tumu(
@@ -1489,7 +1489,7 @@ void test_vluxseg3ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_tumu(
@@ -1504,7 +1504,7 @@ void test_vluxseg3ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_tumu(
@@ -1519,7 +1519,7 @@ void test_vluxseg3ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_tumu(
@@ -1534,7 +1534,7 @@ void test_vluxseg3ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_tumu(
@@ -1549,7 +1549,7 @@ void test_vluxseg3ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_tumu(
@@ -1564,7 +1564,7 @@ void test_vluxseg3ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg3ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_mu(
@@ -1594,7 +1594,7 @@ void test_vluxseg3ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_mu(
@@ -1609,7 +1609,7 @@ void test_vluxseg3ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_mu(
@@ -1624,7 +1624,7 @@ void test_vluxseg3ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_mu(
@@ -1639,7 +1639,7 @@ void test_vluxseg3ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_mu(
@@ -1654,7 +1654,7 @@ void test_vluxseg3ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_mu(
@@ -1669,7 +1669,7 @@ void test_vluxseg3ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_mu(
@@ -1684,7 +1684,7 @@ void test_vluxseg3ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_mu(
@@ -1699,7 +1699,7 @@ void test_vluxseg3ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_mu(
@@ -1714,7 +1714,7 @@ void test_vluxseg3ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_mu(
@@ -1729,7 +1729,7 @@ void test_vluxseg3ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_mu(
@@ -1744,7 +1744,7 @@ void test_vluxseg3ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_mu(
@@ -1759,7 +1759,7 @@ void test_vluxseg3ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_mu(
@@ -1774,7 +1774,7 @@ void test_vluxseg3ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_mu(
@@ -1789,7 +1789,7 @@ void test_vluxseg3ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_mu(
@@ -1804,7 +1804,7 @@ void test_vluxseg3ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_mu(
@@ -1819,7 +1819,7 @@ void test_vluxseg3ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_mu(
@@ -1834,7 +1834,7 @@ void test_vluxseg3ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_mu(
@@ -1849,7 +1849,7 @@ void test_vluxseg3ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_mu(
@@ -1864,7 +1864,7 @@ void test_vluxseg3ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_mu(
@@ -1879,7 +1879,7 @@ void test_vluxseg3ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_mu(
@@ -1894,7 +1894,7 @@ void test_vluxseg3ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_mu(
@@ -1909,7 +1909,7 @@ void test_vluxseg3ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_mu(
@@ -1924,7 +1924,7 @@ void test_vluxseg3ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_mu(
@@ -1939,7 +1939,7 @@ void test_vluxseg3ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vluxseg3ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_mu(
@@ -1969,7 +1969,7 @@ void test_vluxseg3ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_mu(
@@ -1984,7 +1984,7 @@ void test_vluxseg3ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_mu(
@@ -1999,7 +1999,7 @@ void test_vluxseg3ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_mu(
@@ -2014,7 +2014,7 @@ void test_vluxseg3ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_mu(
@@ -2029,7 +2029,7 @@ void test_vluxseg3ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_mu(
@@ -2044,7 +2044,7 @@ void test_vluxseg3ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_mu(
@@ -2059,7 +2059,7 @@ void test_vluxseg3ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_mu(
@@ -2074,7 +2074,7 @@ void test_vluxseg3ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_mu(
@@ -2089,7 +2089,7 @@ void test_vluxseg3ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_mu(
@@ -2104,6 +2104,6 @@ void test_vluxseg3ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c
index eae9d3433a7d..1f47591cc736 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c
@@ -19,7 +19,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_tu(
@@ -34,7 +34,7 @@ void test_vluxseg3ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_tu(
@@ -49,7 +49,7 @@ void test_vluxseg3ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_tu(
@@ -64,7 +64,7 @@ void test_vluxseg3ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_tu(
@@ -79,7 +79,7 @@ void test_vluxseg3ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_tu(
@@ -94,7 +94,7 @@ void test_vluxseg3ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_tu(
@@ -109,7 +109,7 @@ void test_vluxseg3ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_tu(
@@ -124,7 +124,7 @@ void test_vluxseg3ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_tu(
@@ -139,7 +139,7 @@ void test_vluxseg3ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_tu(
@@ -154,7 +154,7 @@ void test_vluxseg3ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_tu(
@@ -169,7 +169,7 @@ void test_vluxseg3ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_tu(
@@ -184,7 +184,7 @@ void test_vluxseg3ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_tu(
@@ -199,7 +199,7 @@ void test_vluxseg3ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_tu(
@@ -214,7 +214,7 @@ void test_vluxseg3ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_tu(
@@ -229,7 +229,7 @@ void test_vluxseg3ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_tu(
@@ -244,7 +244,7 @@ void test_vluxseg3ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_tu(
@@ -259,7 +259,7 @@ void test_vluxseg3ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_tu(
@@ -274,7 +274,7 @@ void test_vluxseg3ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_tu(
@@ -289,7 +289,7 @@ void test_vluxseg3ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_tu(
@@ -304,7 +304,7 @@ void test_vluxseg3ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_tu(
@@ -319,7 +319,7 @@ void test_vluxseg3ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_tu(
@@ -334,7 +334,7 @@ void test_vluxseg3ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_tu(
@@ -349,7 +349,7 @@ void test_vluxseg3ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_tu(
@@ -364,7 +364,7 @@ void test_vluxseg3ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_tu(
@@ -379,7 +379,7 @@ void test_vluxseg3ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_tu(
@@ -394,7 +394,7 @@ void test_vluxseg3ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_tu(
@@ -409,7 +409,7 @@ void test_vluxseg3ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_tu(
@@ -424,7 +424,7 @@ void test_vluxseg3ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_tu(
@@ -439,7 +439,7 @@ void test_vluxseg3ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_tu(
@@ -454,7 +454,7 @@ void test_vluxseg3ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_tu(
@@ -469,7 +469,7 @@ void test_vluxseg3ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_tu(
@@ -484,7 +484,7 @@ void test_vluxseg3ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_tu(
@@ -499,7 +499,7 @@ void test_vluxseg3ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_tu(
@@ -514,7 +514,7 @@ void test_vluxseg3ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_tu(
@@ -529,7 +529,7 @@ void test_vluxseg3ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_tu(
@@ -544,7 +544,7 @@ void test_vluxseg3ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_tu(
@@ -559,7 +559,7 @@ void test_vluxseg3ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_tum(
@@ -574,7 +574,7 @@ void test_vluxseg3ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_tum(
@@ -589,7 +589,7 @@ void test_vluxseg3ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_tum(
@@ -604,7 +604,7 @@ void test_vluxseg3ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_tum(
@@ -619,7 +619,7 @@ void test_vluxseg3ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vluxseg3ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_tum(
@@ -649,7 +649,7 @@ void test_vluxseg3ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_tum(
@@ -664,7 +664,7 @@ void test_vluxseg3ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_tum(
@@ -679,7 +679,7 @@ void test_vluxseg3ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_tum(
@@ -694,7 +694,7 @@ void test_vluxseg3ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_tum(
@@ -709,7 +709,7 @@ void test_vluxseg3ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_tum(
@@ -724,7 +724,7 @@ void test_vluxseg3ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vluxseg3ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_tum(
@@ -754,7 +754,7 @@ void test_vluxseg3ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_tum(
@@ -769,7 +769,7 @@ void test_vluxseg3ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_tum(
@@ -784,7 +784,7 @@ void test_vluxseg3ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_tum(
@@ -799,7 +799,7 @@ void test_vluxseg3ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_tum(
@@ -814,7 +814,7 @@ void test_vluxseg3ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_tum(
@@ -829,7 +829,7 @@ void test_vluxseg3ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vluxseg3ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_tum(
@@ -859,7 +859,7 @@ void test_vluxseg3ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_tum(
@@ -874,7 +874,7 @@ void test_vluxseg3ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_tum(
@@ -889,7 +889,7 @@ void test_vluxseg3ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_tum(
@@ -904,7 +904,7 @@ void test_vluxseg3ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_tum(
@@ -919,7 +919,7 @@ void test_vluxseg3ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_tum(
@@ -934,7 +934,7 @@ void test_vluxseg3ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vluxseg3ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_tum(
@@ -964,7 +964,7 @@ void test_vluxseg3ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_tum(
@@ -979,7 +979,7 @@ void test_vluxseg3ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_tum(
@@ -994,7 +994,7 @@ void test_vluxseg3ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_tum(
@@ -1009,7 +1009,7 @@ void test_vluxseg3ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_tum(
@@ -1024,7 +1024,7 @@ void test_vluxseg3ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_tum(
@@ -1039,7 +1039,7 @@ void test_vluxseg3ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg3ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_tum(
@@ -1069,7 +1069,7 @@ void test_vluxseg3ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_tum(
@@ -1084,7 +1084,7 @@ void test_vluxseg3ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_tum(
@@ -1099,7 +1099,7 @@ void test_vluxseg3ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_tum(
@@ -1114,7 +1114,7 @@ void test_vluxseg3ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_tumu(
@@ -1129,7 +1129,7 @@ void test_vluxseg3ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_tumu(
@@ -1144,7 +1144,7 @@ void test_vluxseg3ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vluxseg3ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_tumu(
@@ -1174,7 +1174,7 @@ void test_vluxseg3ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_tumu(
@@ -1189,7 +1189,7 @@ void test_vluxseg3ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_tumu(
@@ -1204,7 +1204,7 @@ void test_vluxseg3ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_tumu(
@@ -1219,7 +1219,7 @@ void test_vluxseg3ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_tumu(
@@ -1234,7 +1234,7 @@ void test_vluxseg3ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_tumu(
@@ -1249,7 +1249,7 @@ void test_vluxseg3ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_tumu(
@@ -1264,7 +1264,7 @@ void test_vluxseg3ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vluxseg3ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_tumu(
@@ -1294,7 +1294,7 @@ void test_vluxseg3ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_tumu(
@@ -1309,7 +1309,7 @@ void test_vluxseg3ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_tumu(
@@ -1324,7 +1324,7 @@ void test_vluxseg3ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_tumu(
@@ -1339,7 +1339,7 @@ void test_vluxseg3ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vluxseg3ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg3ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_tumu(
@@ -1384,7 +1384,7 @@ void test_vluxseg3ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_tumu(
@@ -1399,7 +1399,7 @@ void test_vluxseg3ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_tumu(
@@ -1414,7 +1414,7 @@ void test_vluxseg3ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg3ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_tumu(
@@ -1444,7 +1444,7 @@ void test_vluxseg3ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_tumu(
@@ -1459,7 +1459,7 @@ void test_vluxseg3ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_tumu(
@@ -1474,7 +1474,7 @@ void test_vluxseg3ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_tumu(
@@ -1489,7 +1489,7 @@ void test_vluxseg3ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_tumu(
@@ -1504,7 +1504,7 @@ void test_vluxseg3ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_tumu(
@@ -1519,7 +1519,7 @@ void test_vluxseg3ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_tumu(
@@ -1534,7 +1534,7 @@ void test_vluxseg3ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_tumu(
@@ -1549,7 +1549,7 @@ void test_vluxseg3ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_tumu(
@@ -1564,7 +1564,7 @@ void test_vluxseg3ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg3ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_tumu(
@@ -1594,7 +1594,7 @@ void test_vluxseg3ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_tumu(
@@ -1609,7 +1609,7 @@ void test_vluxseg3ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_tumu(
@@ -1624,7 +1624,7 @@ void test_vluxseg3ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_tumu(
@@ -1639,7 +1639,7 @@ void test_vluxseg3ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_tumu(
@@ -1654,7 +1654,7 @@ void test_vluxseg3ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_tumu(
@@ -1669,7 +1669,7 @@ void test_vluxseg3ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_mu(
@@ -1684,7 +1684,7 @@ void test_vluxseg3ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_mu(
@@ -1699,7 +1699,7 @@ void test_vluxseg3ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_mu(
@@ -1714,7 +1714,7 @@ void test_vluxseg3ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_mu(
@@ -1729,7 +1729,7 @@ void test_vluxseg3ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_mu(
@@ -1744,7 +1744,7 @@ void test_vluxseg3ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_mu(
@@ -1759,7 +1759,7 @@ void test_vluxseg3ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_mu(
@@ -1774,7 +1774,7 @@ void test_vluxseg3ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_mu(
@@ -1789,7 +1789,7 @@ void test_vluxseg3ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_mu(
@@ -1804,7 +1804,7 @@ void test_vluxseg3ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_mu(
@@ -1819,7 +1819,7 @@ void test_vluxseg3ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_mu(
@@ -1834,7 +1834,7 @@ void test_vluxseg3ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_mu(
@@ -1849,7 +1849,7 @@ void test_vluxseg3ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_mu(
@@ -1864,7 +1864,7 @@ void test_vluxseg3ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_mu(
@@ -1879,7 +1879,7 @@ void test_vluxseg3ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_mu(
@@ -1894,7 +1894,7 @@ void test_vluxseg3ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_mu(
@@ -1909,7 +1909,7 @@ void test_vluxseg3ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_mu(
@@ -1924,7 +1924,7 @@ void test_vluxseg3ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_mu(
@@ -1939,7 +1939,7 @@ void test_vluxseg3ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_mu(
@@ -1954,7 +1954,7 @@ void test_vluxseg3ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_mu(
@@ -1969,7 +1969,7 @@ void test_vluxseg3ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_mu(
@@ -1984,7 +1984,7 @@ void test_vluxseg3ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_mu(
@@ -1999,7 +1999,7 @@ void test_vluxseg3ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_mu(
@@ -2014,7 +2014,7 @@ void test_vluxseg3ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_mu(
@@ -2029,7 +2029,7 @@ void test_vluxseg3ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_mu(
@@ -2044,7 +2044,7 @@ void test_vluxseg3ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_mu(
@@ -2059,7 +2059,7 @@ void test_vluxseg3ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_mu(
@@ -2074,7 +2074,7 @@ void test_vluxseg3ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_mu(
@@ -2089,7 +2089,7 @@ void test_vluxseg3ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg3ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_mu(
@@ -2119,7 +2119,7 @@ void test_vluxseg3ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_mu(
@@ -2134,7 +2134,7 @@ void test_vluxseg3ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_mu(
@@ -2149,7 +2149,7 @@ void test_vluxseg3ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_mu(
@@ -2164,7 +2164,7 @@ void test_vluxseg3ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_mu(
@@ -2179,7 +2179,7 @@ void test_vluxseg3ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_mu(
@@ -2194,7 +2194,7 @@ void test_vluxseg3ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_mu(
@@ -2209,7 +2209,7 @@ void test_vluxseg3ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_mu(
@@ -2224,6 +2224,6 @@ void test_vluxseg3ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg3ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+ return __riscv_vluxseg3ei8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c
index 902dc4578385..1717a4e00891 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vluxseg4ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vluxseg4ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vluxseg4ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_tu(
@@ -89,7 +89,7 @@ void test_vluxseg4ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_tu(
@@ -106,7 +106,7 @@ void test_vluxseg4ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_tu(
@@ -123,7 +123,7 @@ void test_vluxseg4ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_tu(
@@ -140,7 +140,7 @@ void test_vluxseg4ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_tu(
@@ -157,7 +157,7 @@ void test_vluxseg4ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_tu(
@@ -174,7 +174,7 @@ void test_vluxseg4ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_tu(
@@ -191,7 +191,7 @@ void test_vluxseg4ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_tu(
@@ -208,7 +208,7 @@ void test_vluxseg4ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_tu(
@@ -225,7 +225,7 @@ void test_vluxseg4ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_tu(
@@ -242,7 +242,7 @@ void test_vluxseg4ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_tu(
@@ -259,7 +259,7 @@ void test_vluxseg4ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_tu(
@@ -276,7 +276,7 @@ void test_vluxseg4ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_tu(
@@ -293,7 +293,7 @@ void test_vluxseg4ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_tu(
@@ -310,7 +310,7 @@ void test_vluxseg4ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_tu(
@@ -327,7 +327,7 @@ void test_vluxseg4ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_tu(
@@ -344,7 +344,7 @@ void test_vluxseg4ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_tu(
@@ -361,7 +361,7 @@ void test_vluxseg4ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_tu(
@@ -378,7 +378,7 @@ void test_vluxseg4ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_tu(
@@ -395,7 +395,7 @@ void test_vluxseg4ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_tu(
@@ -412,7 +412,7 @@ void test_vluxseg4ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_tu(
@@ -429,7 +429,7 @@ void test_vluxseg4ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_tu(
@@ -446,7 +446,7 @@ void test_vluxseg4ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_tu(
@@ -463,7 +463,7 @@ void test_vluxseg4ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_tu(
@@ -480,7 +480,7 @@ void test_vluxseg4ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_tu(
@@ -497,7 +497,7 @@ void test_vluxseg4ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_tu(
@@ -514,7 +514,7 @@ void test_vluxseg4ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_tu(
@@ -531,7 +531,7 @@ void test_vluxseg4ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_tu(
@@ -548,7 +548,7 @@ void test_vluxseg4ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_tu(
@@ -565,7 +565,7 @@ void test_vluxseg4ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_tu(
@@ -582,7 +582,7 @@ void test_vluxseg4ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_tu(
@@ -599,7 +599,7 @@ void test_vluxseg4ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_tu(
@@ -616,7 +616,7 @@ void test_vluxseg4ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_tu(
@@ -633,7 +633,7 @@ void test_vluxseg4ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_tum(
@@ -650,7 +650,7 @@ void test_vluxseg4ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_tum(
@@ -667,7 +667,7 @@ void test_vluxseg4ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_tum(
@@ -684,7 +684,7 @@ void test_vluxseg4ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_tum(
@@ -701,7 +701,7 @@ void test_vluxseg4ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_tum(
@@ -718,7 +718,7 @@ void test_vluxseg4ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_tum(
@@ -735,7 +735,7 @@ void test_vluxseg4ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_tum(
@@ -752,7 +752,7 @@ void test_vluxseg4ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_tum(
@@ -769,7 +769,7 @@ void test_vluxseg4ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_tum(
@@ -786,7 +786,7 @@ void test_vluxseg4ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_tum(
@@ -803,7 +803,7 @@ void test_vluxseg4ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_tum(
@@ -820,7 +820,7 @@ void test_vluxseg4ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_tum(
@@ -837,7 +837,7 @@ void test_vluxseg4ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_tum(
@@ -854,7 +854,7 @@ void test_vluxseg4ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_tum(
@@ -871,7 +871,7 @@ void test_vluxseg4ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_tum(
@@ -888,7 +888,7 @@ void test_vluxseg4ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_tum(
@@ -905,7 +905,7 @@ void test_vluxseg4ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_tum(
@@ -922,7 +922,7 @@ void test_vluxseg4ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_tum(
@@ -939,7 +939,7 @@ void test_vluxseg4ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_tum(
@@ -956,7 +956,7 @@ void test_vluxseg4ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_tum(
@@ -973,7 +973,7 @@ void test_vluxseg4ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_tum(
@@ -990,7 +990,7 @@ void test_vluxseg4ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_tum(
@@ -1007,7 +1007,7 @@ void test_vluxseg4ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_tum(
@@ -1024,7 +1024,7 @@ void test_vluxseg4ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_tum(
@@ -1041,7 +1041,7 @@ void test_vluxseg4ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_tum(
@@ -1058,7 +1058,7 @@ void test_vluxseg4ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_tum(
@@ -1075,7 +1075,7 @@ void test_vluxseg4ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_tum(
@@ -1092,7 +1092,7 @@ void test_vluxseg4ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_tum(
@@ -1109,7 +1109,7 @@ void test_vluxseg4ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_tum(
@@ -1126,7 +1126,7 @@ void test_vluxseg4ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_tum(
@@ -1143,7 +1143,7 @@ void test_vluxseg4ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_tum(
@@ -1160,7 +1160,7 @@ void test_vluxseg4ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_tum(
@@ -1177,7 +1177,7 @@ void test_vluxseg4ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_tum(
@@ -1194,7 +1194,7 @@ void test_vluxseg4ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_tum(
@@ -1211,7 +1211,7 @@ void test_vluxseg4ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_tum(
@@ -1228,7 +1228,7 @@ void test_vluxseg4ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_tum(
@@ -1245,7 +1245,7 @@ void test_vluxseg4ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_tum(
@@ -1262,7 +1262,7 @@ void test_vluxseg4ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vluxseg4ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_tumu(
@@ -1296,7 +1296,7 @@ void test_vluxseg4ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_tumu(
@@ -1313,7 +1313,7 @@ void test_vluxseg4ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_tumu(
@@ -1330,7 +1330,7 @@ void test_vluxseg4ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_tumu(
@@ -1347,7 +1347,7 @@ void test_vluxseg4ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_tumu(
@@ -1364,7 +1364,7 @@ void test_vluxseg4ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_tumu(
@@ -1381,7 +1381,7 @@ void test_vluxseg4ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_tumu(
@@ -1398,7 +1398,7 @@ void test_vluxseg4ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_tumu(
@@ -1415,7 +1415,7 @@ void test_vluxseg4ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_tumu(
@@ -1432,7 +1432,7 @@ void test_vluxseg4ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_tumu(
@@ -1449,7 +1449,7 @@ void test_vluxseg4ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_tumu(
@@ -1466,7 +1466,7 @@ void test_vluxseg4ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_tumu(
@@ -1483,7 +1483,7 @@ void test_vluxseg4ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_tumu(
@@ -1500,7 +1500,7 @@ void test_vluxseg4ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_tumu(
@@ -1517,7 +1517,7 @@ void test_vluxseg4ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_tumu(
@@ -1534,7 +1534,7 @@ void test_vluxseg4ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vluxseg4ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_tumu(
@@ -1568,7 +1568,7 @@ void test_vluxseg4ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_tumu(
@@ -1585,7 +1585,7 @@ void test_vluxseg4ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_tumu(
@@ -1602,7 +1602,7 @@ void test_vluxseg4ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_tumu(
@@ -1619,7 +1619,7 @@ void test_vluxseg4ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_tumu(
@@ -1636,7 +1636,7 @@ void test_vluxseg4ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_tumu(
@@ -1653,7 +1653,7 @@ void test_vluxseg4ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_tumu(
@@ -1670,7 +1670,7 @@ void test_vluxseg4ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_tumu(
@@ -1687,7 +1687,7 @@ void test_vluxseg4ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_tumu(
@@ -1704,7 +1704,7 @@ void test_vluxseg4ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_tumu(
@@ -1721,7 +1721,7 @@ void test_vluxseg4ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_tumu(
@@ -1738,7 +1738,7 @@ void test_vluxseg4ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_tumu(
@@ -1755,7 +1755,7 @@ void test_vluxseg4ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_tumu(
@@ -1772,7 +1772,7 @@ void test_vluxseg4ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_tumu(
@@ -1789,7 +1789,7 @@ void test_vluxseg4ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_tumu(
@@ -1806,7 +1806,7 @@ void test_vluxseg4ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_tumu(
@@ -1823,7 +1823,7 @@ void test_vluxseg4ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_tumu(
@@ -1840,7 +1840,7 @@ void test_vluxseg4ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_tumu(
@@ -1857,7 +1857,7 @@ void test_vluxseg4ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_tumu(
@@ -1874,7 +1874,7 @@ void test_vluxseg4ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_tumu(
@@ -1891,7 +1891,7 @@ void test_vluxseg4ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_mu(
@@ -1908,7 +1908,7 @@ void test_vluxseg4ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_mu(
@@ -1925,7 +1925,7 @@ void test_vluxseg4ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_mu(
@@ -1942,7 +1942,7 @@ void test_vluxseg4ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_mu(
@@ -1959,7 +1959,7 @@ void test_vluxseg4ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_mu(
@@ -1976,7 +1976,7 @@ void test_vluxseg4ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_mu(
@@ -1993,7 +1993,7 @@ void test_vluxseg4ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_mu(
@@ -2010,7 +2010,7 @@ void test_vluxseg4ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_mu(
@@ -2027,7 +2027,7 @@ void test_vluxseg4ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_mu(
@@ -2044,7 +2044,7 @@ void test_vluxseg4ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_mu(
@@ -2061,7 +2061,7 @@ void test_vluxseg4ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_mu(
@@ -2078,7 +2078,7 @@ void test_vluxseg4ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_mu(
@@ -2095,7 +2095,7 @@ void test_vluxseg4ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_mu(
@@ -2112,7 +2112,7 @@ void test_vluxseg4ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_mu(
@@ -2129,7 +2129,7 @@ void test_vluxseg4ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_mu(
@@ -2146,7 +2146,7 @@ void test_vluxseg4ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_mu(
@@ -2163,7 +2163,7 @@ void test_vluxseg4ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_mu(
@@ -2180,7 +2180,7 @@ void test_vluxseg4ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_mu(
@@ -2197,7 +2197,7 @@ void test_vluxseg4ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vluxseg4ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_mu(
@@ -2231,7 +2231,7 @@ void test_vluxseg4ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_mu(
@@ -2248,7 +2248,7 @@ void test_vluxseg4ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_mu(
@@ -2265,7 +2265,7 @@ void test_vluxseg4ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_mu(
@@ -2282,7 +2282,7 @@ void test_vluxseg4ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_mu(
@@ -2299,7 +2299,7 @@ void test_vluxseg4ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_mu(
@@ -2316,7 +2316,7 @@ void test_vluxseg4ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_mu(
@@ -2333,7 +2333,7 @@ void test_vluxseg4ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_mu(
@@ -2350,7 +2350,7 @@ void test_vluxseg4ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_mu(
@@ -2367,7 +2367,7 @@ void test_vluxseg4ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_mu(
@@ -2384,7 +2384,7 @@ void test_vluxseg4ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_mu(
@@ -2401,7 +2401,7 @@ void test_vluxseg4ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_mu(
@@ -2418,7 +2418,7 @@ void test_vluxseg4ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_mu(
@@ -2435,7 +2435,7 @@ void test_vluxseg4ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_mu(
@@ -2452,7 +2452,7 @@ void test_vluxseg4ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_mu(
@@ -2469,7 +2469,7 @@ void test_vluxseg4ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_mu(
@@ -2486,7 +2486,7 @@ void test_vluxseg4ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_mu(
@@ -2503,7 +2503,7 @@ void test_vluxseg4ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_mu(
@@ -2520,6 +2520,6 @@ void test_vluxseg4ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c
index fd392c7c6c50..d221fbdd95bc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vluxseg4ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vluxseg4ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vluxseg4ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_tu(
@@ -89,7 +89,7 @@ void test_vluxseg4ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_tu(
@@ -106,7 +106,7 @@ void test_vluxseg4ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_tu(
@@ -123,7 +123,7 @@ void test_vluxseg4ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_tu(
@@ -140,7 +140,7 @@ void test_vluxseg4ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_tu(
@@ -157,7 +157,7 @@ void test_vluxseg4ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_tu(
@@ -174,7 +174,7 @@ void test_vluxseg4ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_tu(
@@ -191,7 +191,7 @@ void test_vluxseg4ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_tu(
@@ -208,7 +208,7 @@ void test_vluxseg4ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_tu(
@@ -225,7 +225,7 @@ void test_vluxseg4ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_tu(
@@ -242,7 +242,7 @@ void test_vluxseg4ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_tu(
@@ -259,7 +259,7 @@ void test_vluxseg4ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_tu(
@@ -276,7 +276,7 @@ void test_vluxseg4ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_tu(
@@ -293,7 +293,7 @@ void test_vluxseg4ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_tu(
@@ -310,7 +310,7 @@ void test_vluxseg4ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_tu(
@@ -327,7 +327,7 @@ void test_vluxseg4ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_tu(
@@ -344,7 +344,7 @@ void test_vluxseg4ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_tu(
@@ -361,7 +361,7 @@ void test_vluxseg4ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_tu(
@@ -378,7 +378,7 @@ void test_vluxseg4ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_tu(
@@ -395,7 +395,7 @@ void test_vluxseg4ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_tu(
@@ -412,7 +412,7 @@ void test_vluxseg4ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_tu(
@@ -429,7 +429,7 @@ void test_vluxseg4ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_tu(
@@ -446,7 +446,7 @@ void test_vluxseg4ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_tu(
@@ -463,7 +463,7 @@ void test_vluxseg4ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_tu(
@@ -480,7 +480,7 @@ void test_vluxseg4ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_tu(
@@ -497,7 +497,7 @@ void test_vluxseg4ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_tu(
@@ -514,7 +514,7 @@ void test_vluxseg4ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_tu(
@@ -531,7 +531,7 @@ void test_vluxseg4ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_tu(
@@ -548,7 +548,7 @@ void test_vluxseg4ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_tu(
@@ -565,7 +565,7 @@ void test_vluxseg4ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_tu(
@@ -582,7 +582,7 @@ void test_vluxseg4ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_tu(
@@ -599,7 +599,7 @@ void test_vluxseg4ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_tu(
@@ -616,7 +616,7 @@ void test_vluxseg4ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_tu(
@@ -633,7 +633,7 @@ void test_vluxseg4ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_tum(
@@ -650,7 +650,7 @@ void test_vluxseg4ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_tum(
@@ -667,7 +667,7 @@ void test_vluxseg4ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_tum(
@@ -684,7 +684,7 @@ void test_vluxseg4ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_tum(
@@ -701,7 +701,7 @@ void test_vluxseg4ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_tum(
@@ -718,7 +718,7 @@ void test_vluxseg4ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_tum(
@@ -735,7 +735,7 @@ void test_vluxseg4ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_tum(
@@ -752,7 +752,7 @@ void test_vluxseg4ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_tum(
@@ -769,7 +769,7 @@ void test_vluxseg4ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_tum(
@@ -786,7 +786,7 @@ void test_vluxseg4ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_tum(
@@ -803,7 +803,7 @@ void test_vluxseg4ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_tum(
@@ -820,7 +820,7 @@ void test_vluxseg4ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_tum(
@@ -837,7 +837,7 @@ void test_vluxseg4ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_tum(
@@ -854,7 +854,7 @@ void test_vluxseg4ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_tum(
@@ -871,7 +871,7 @@ void test_vluxseg4ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_tum(
@@ -888,7 +888,7 @@ void test_vluxseg4ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_tum(
@@ -905,7 +905,7 @@ void test_vluxseg4ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_tum(
@@ -922,7 +922,7 @@ void test_vluxseg4ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_tum(
@@ -939,7 +939,7 @@ void test_vluxseg4ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_tum(
@@ -956,7 +956,7 @@ void test_vluxseg4ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_tum(
@@ -973,7 +973,7 @@ void test_vluxseg4ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_tum(
@@ -990,7 +990,7 @@ void test_vluxseg4ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_tum(
@@ -1007,7 +1007,7 @@ void test_vluxseg4ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_tum(
@@ -1024,7 +1024,7 @@ void test_vluxseg4ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_tum(
@@ -1041,7 +1041,7 @@ void test_vluxseg4ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_tum(
@@ -1058,7 +1058,7 @@ void test_vluxseg4ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_tum(
@@ -1075,7 +1075,7 @@ void test_vluxseg4ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_tum(
@@ -1092,7 +1092,7 @@ void test_vluxseg4ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_tum(
@@ -1109,7 +1109,7 @@ void test_vluxseg4ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_tum(
@@ -1126,7 +1126,7 @@ void test_vluxseg4ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_tum(
@@ -1143,7 +1143,7 @@ void test_vluxseg4ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_tum(
@@ -1160,7 +1160,7 @@ void test_vluxseg4ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_tum(
@@ -1177,7 +1177,7 @@ void test_vluxseg4ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_tum(
@@ -1194,7 +1194,7 @@ void test_vluxseg4ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_tum(
@@ -1211,7 +1211,7 @@ void test_vluxseg4ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_tum(
@@ -1228,7 +1228,7 @@ void test_vluxseg4ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_tum(
@@ -1245,7 +1245,7 @@ void test_vluxseg4ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_tum(
@@ -1262,7 +1262,7 @@ void test_vluxseg4ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vluxseg4ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_tumu(
@@ -1296,7 +1296,7 @@ void test_vluxseg4ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_tumu(
@@ -1313,7 +1313,7 @@ void test_vluxseg4ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_tumu(
@@ -1330,7 +1330,7 @@ void test_vluxseg4ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_tumu(
@@ -1347,7 +1347,7 @@ void test_vluxseg4ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_tumu(
@@ -1364,7 +1364,7 @@ void test_vluxseg4ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_tumu(
@@ -1381,7 +1381,7 @@ void test_vluxseg4ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_tumu(
@@ -1398,7 +1398,7 @@ void test_vluxseg4ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_tumu(
@@ -1415,7 +1415,7 @@ void test_vluxseg4ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_tumu(
@@ -1432,7 +1432,7 @@ void test_vluxseg4ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_tumu(
@@ -1449,7 +1449,7 @@ void test_vluxseg4ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_tumu(
@@ -1466,7 +1466,7 @@ void test_vluxseg4ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_tumu(
@@ -1483,7 +1483,7 @@ void test_vluxseg4ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_tumu(
@@ -1500,7 +1500,7 @@ void test_vluxseg4ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_tumu(
@@ -1517,7 +1517,7 @@ void test_vluxseg4ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_tumu(
@@ -1534,7 +1534,7 @@ void test_vluxseg4ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vluxseg4ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_tumu(
@@ -1568,7 +1568,7 @@ void test_vluxseg4ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_tumu(
@@ -1585,7 +1585,7 @@ void test_vluxseg4ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_tumu(
@@ -1602,7 +1602,7 @@ void test_vluxseg4ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_tumu(
@@ -1619,7 +1619,7 @@ void test_vluxseg4ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_tumu(
@@ -1636,7 +1636,7 @@ void test_vluxseg4ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_tumu(
@@ -1653,7 +1653,7 @@ void test_vluxseg4ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_tumu(
@@ -1670,7 +1670,7 @@ void test_vluxseg4ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_tumu(
@@ -1687,7 +1687,7 @@ void test_vluxseg4ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_tumu(
@@ -1704,7 +1704,7 @@ void test_vluxseg4ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_tumu(
@@ -1721,7 +1721,7 @@ void test_vluxseg4ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_tumu(
@@ -1738,7 +1738,7 @@ void test_vluxseg4ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_tumu(
@@ -1755,7 +1755,7 @@ void test_vluxseg4ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_tumu(
@@ -1772,7 +1772,7 @@ void test_vluxseg4ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_tumu(
@@ -1789,7 +1789,7 @@ void test_vluxseg4ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_tumu(
@@ -1806,7 +1806,7 @@ void test_vluxseg4ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_tumu(
@@ -1823,7 +1823,7 @@ void test_vluxseg4ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_tumu(
@@ -1840,7 +1840,7 @@ void test_vluxseg4ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_tumu(
@@ -1857,7 +1857,7 @@ void test_vluxseg4ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_tumu(
@@ -1874,7 +1874,7 @@ void test_vluxseg4ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_tumu(
@@ -1891,7 +1891,7 @@ void test_vluxseg4ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_mu(
@@ -1908,7 +1908,7 @@ void test_vluxseg4ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_mu(
@@ -1925,7 +1925,7 @@ void test_vluxseg4ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_mu(
@@ -1942,7 +1942,7 @@ void test_vluxseg4ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_mu(
@@ -1959,7 +1959,7 @@ void test_vluxseg4ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_mu(
@@ -1976,7 +1976,7 @@ void test_vluxseg4ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_mu(
@@ -1993,7 +1993,7 @@ void test_vluxseg4ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_mu(
@@ -2010,7 +2010,7 @@ void test_vluxseg4ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_mu(
@@ -2027,7 +2027,7 @@ void test_vluxseg4ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_mu(
@@ -2044,7 +2044,7 @@ void test_vluxseg4ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_mu(
@@ -2061,7 +2061,7 @@ void test_vluxseg4ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_mu(
@@ -2078,7 +2078,7 @@ void test_vluxseg4ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_mu(
@@ -2095,7 +2095,7 @@ void test_vluxseg4ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_mu(
@@ -2112,7 +2112,7 @@ void test_vluxseg4ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_mu(
@@ -2129,7 +2129,7 @@ void test_vluxseg4ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_mu(
@@ -2146,7 +2146,7 @@ void test_vluxseg4ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_mu(
@@ -2163,7 +2163,7 @@ void test_vluxseg4ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_mu(
@@ -2180,7 +2180,7 @@ void test_vluxseg4ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_mu(
@@ -2197,7 +2197,7 @@ void test_vluxseg4ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vluxseg4ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_mu(
@@ -2231,7 +2231,7 @@ void test_vluxseg4ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_mu(
@@ -2248,7 +2248,7 @@ void test_vluxseg4ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_mu(
@@ -2265,7 +2265,7 @@ void test_vluxseg4ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_mu(
@@ -2282,7 +2282,7 @@ void test_vluxseg4ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_mu(
@@ -2299,7 +2299,7 @@ void test_vluxseg4ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_mu(
@@ -2316,7 +2316,7 @@ void test_vluxseg4ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_mu(
@@ -2333,7 +2333,7 @@ void test_vluxseg4ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_mu(
@@ -2350,7 +2350,7 @@ void test_vluxseg4ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_mu(
@@ -2367,7 +2367,7 @@ void test_vluxseg4ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_mu(
@@ -2384,7 +2384,7 @@ void test_vluxseg4ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_mu(
@@ -2401,7 +2401,7 @@ void test_vluxseg4ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_mu(
@@ -2418,7 +2418,7 @@ void test_vluxseg4ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_mu(
@@ -2435,7 +2435,7 @@ void test_vluxseg4ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_mu(
@@ -2452,7 +2452,7 @@ void test_vluxseg4ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_mu(
@@ -2469,7 +2469,7 @@ void test_vluxseg4ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_mu(
@@ -2486,7 +2486,7 @@ void test_vluxseg4ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_mu(
@@ -2503,7 +2503,7 @@ void test_vluxseg4ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_mu(
@@ -2520,6 +2520,6 @@ void test_vluxseg4ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c
index 5edcdde3bc05..31d61ae5af4a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vluxseg4ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vluxseg4ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vluxseg4ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_tu(
@@ -89,7 +89,7 @@ void test_vluxseg4ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_tu(
@@ -106,7 +106,7 @@ void test_vluxseg4ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_tu(
@@ -123,7 +123,7 @@ void test_vluxseg4ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_tu(
@@ -140,7 +140,7 @@ void test_vluxseg4ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_tu(
@@ -157,7 +157,7 @@ void test_vluxseg4ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_tu(
@@ -174,7 +174,7 @@ void test_vluxseg4ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_tu(
@@ -191,7 +191,7 @@ void test_vluxseg4ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_tu(
@@ -208,7 +208,7 @@ void test_vluxseg4ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_tu(
@@ -225,7 +225,7 @@ void test_vluxseg4ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_tu(
@@ -242,7 +242,7 @@ void test_vluxseg4ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_tu(
@@ -259,7 +259,7 @@ void test_vluxseg4ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_tu(
@@ -276,7 +276,7 @@ void test_vluxseg4ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_tu(
@@ -293,7 +293,7 @@ void test_vluxseg4ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_tu(
@@ -310,7 +310,7 @@ void test_vluxseg4ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_tu(
@@ -327,7 +327,7 @@ void test_vluxseg4ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_tu(
@@ -344,7 +344,7 @@ void test_vluxseg4ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_tu(
@@ -361,7 +361,7 @@ void test_vluxseg4ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_tu(
@@ -378,7 +378,7 @@ void test_vluxseg4ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vluxseg4ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_tu(
@@ -412,7 +412,7 @@ void test_vluxseg4ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_tu(
@@ -429,7 +429,7 @@ void test_vluxseg4ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_tu(
@@ -446,7 +446,7 @@ void test_vluxseg4ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_tu(
@@ -463,7 +463,7 @@ void test_vluxseg4ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_tu(
@@ -480,7 +480,7 @@ void test_vluxseg4ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_tu(
@@ -497,7 +497,7 @@ void test_vluxseg4ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_tu(
@@ -514,7 +514,7 @@ void test_vluxseg4ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_tu(
@@ -531,7 +531,7 @@ void test_vluxseg4ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_tu(
@@ -548,7 +548,7 @@ void test_vluxseg4ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_tu(
@@ -565,7 +565,7 @@ void test_vluxseg4ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_tu(
@@ -582,7 +582,7 @@ void test_vluxseg4ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_tu(
@@ -599,7 +599,7 @@ void test_vluxseg4ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_tum(
@@ -616,7 +616,7 @@ void test_vluxseg4ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_tum(
@@ -633,7 +633,7 @@ void test_vluxseg4ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_tum(
@@ -650,7 +650,7 @@ void test_vluxseg4ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_tum(
@@ -667,7 +667,7 @@ void test_vluxseg4ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_tum(
@@ -684,7 +684,7 @@ void test_vluxseg4ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_tum(
@@ -701,7 +701,7 @@ void test_vluxseg4ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_tum(
@@ -718,7 +718,7 @@ void test_vluxseg4ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_tum(
@@ -735,7 +735,7 @@ void test_vluxseg4ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_tum(
@@ -752,7 +752,7 @@ void test_vluxseg4ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_tum(
@@ -769,7 +769,7 @@ void test_vluxseg4ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vluxseg4ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_tum(
@@ -803,7 +803,7 @@ void test_vluxseg4ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_tum(
@@ -820,7 +820,7 @@ void test_vluxseg4ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_tum(
@@ -837,7 +837,7 @@ void test_vluxseg4ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_tum(
@@ -854,7 +854,7 @@ void test_vluxseg4ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_tum(
@@ -871,7 +871,7 @@ void test_vluxseg4ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_tum(
@@ -888,7 +888,7 @@ void test_vluxseg4ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_tum(
@@ -905,7 +905,7 @@ void test_vluxseg4ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_tum(
@@ -922,7 +922,7 @@ void test_vluxseg4ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_tum(
@@ -939,7 +939,7 @@ void test_vluxseg4ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_tum(
@@ -956,7 +956,7 @@ void test_vluxseg4ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_tum(
@@ -973,7 +973,7 @@ void test_vluxseg4ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_tum(
@@ -990,7 +990,7 @@ void test_vluxseg4ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_tum(
@@ -1007,7 +1007,7 @@ void test_vluxseg4ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_tum(
@@ -1024,7 +1024,7 @@ void test_vluxseg4ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_tum(
@@ -1041,7 +1041,7 @@ void test_vluxseg4ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_tum(
@@ -1058,7 +1058,7 @@ void test_vluxseg4ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_tum(
@@ -1075,7 +1075,7 @@ void test_vluxseg4ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_tum(
@@ -1092,7 +1092,7 @@ void test_vluxseg4ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_tum(
@@ -1109,7 +1109,7 @@ void test_vluxseg4ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_tum(
@@ -1126,7 +1126,7 @@ void test_vluxseg4ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_tum(
@@ -1143,7 +1143,7 @@ void test_vluxseg4ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_tum(
@@ -1160,7 +1160,7 @@ void test_vluxseg4ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_tum(
@@ -1177,7 +1177,7 @@ void test_vluxseg4ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_tum(
@@ -1194,7 +1194,7 @@ void test_vluxseg4ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_tumu(
@@ -1211,7 +1211,7 @@ void test_vluxseg4ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_tumu(
@@ -1228,7 +1228,7 @@ void test_vluxseg4ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_tumu(
@@ -1245,7 +1245,7 @@ void test_vluxseg4ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_tumu(
@@ -1262,7 +1262,7 @@ void test_vluxseg4ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_tumu(
@@ -1279,7 +1279,7 @@ void test_vluxseg4ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vluxseg4ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_tumu(
@@ -1313,7 +1313,7 @@ void test_vluxseg4ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_tumu(
@@ -1330,7 +1330,7 @@ void test_vluxseg4ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_tumu(
@@ -1347,7 +1347,7 @@ void test_vluxseg4ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_tumu(
@@ -1364,7 +1364,7 @@ void test_vluxseg4ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_tumu(
@@ -1381,7 +1381,7 @@ void test_vluxseg4ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_tumu(
@@ -1398,7 +1398,7 @@ void test_vluxseg4ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_tumu(
@@ -1415,7 +1415,7 @@ void test_vluxseg4ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_tumu(
@@ -1432,7 +1432,7 @@ void test_vluxseg4ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_tumu(
@@ -1449,7 +1449,7 @@ void test_vluxseg4ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_tumu(
@@ -1466,7 +1466,7 @@ void test_vluxseg4ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_tumu(
@@ -1483,7 +1483,7 @@ void test_vluxseg4ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_tumu(
@@ -1500,7 +1500,7 @@ void test_vluxseg4ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_tumu(
@@ -1517,7 +1517,7 @@ void test_vluxseg4ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_tumu(
@@ -1534,7 +1534,7 @@ void test_vluxseg4ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vluxseg4ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_tumu(
@@ -1568,7 +1568,7 @@ void test_vluxseg4ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_tumu(
@@ -1585,7 +1585,7 @@ void test_vluxseg4ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_tumu(
@@ -1602,7 +1602,7 @@ void test_vluxseg4ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_tumu(
@@ -1619,7 +1619,7 @@ void test_vluxseg4ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_tumu(
@@ -1636,7 +1636,7 @@ void test_vluxseg4ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_tumu(
@@ -1653,7 +1653,7 @@ void test_vluxseg4ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_tumu(
@@ -1670,7 +1670,7 @@ void test_vluxseg4ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_tumu(
@@ -1687,7 +1687,7 @@ void test_vluxseg4ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_tumu(
@@ -1704,7 +1704,7 @@ void test_vluxseg4ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_tumu(
@@ -1721,7 +1721,7 @@ void test_vluxseg4ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_tumu(
@@ -1738,7 +1738,7 @@ void test_vluxseg4ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_tumu(
@@ -1755,7 +1755,7 @@ void test_vluxseg4ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_tumu(
@@ -1772,7 +1772,7 @@ void test_vluxseg4ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_tumu(
@@ -1789,7 +1789,7 @@ void test_vluxseg4ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_mu(
@@ -1806,7 +1806,7 @@ void test_vluxseg4ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_mu(
@@ -1823,7 +1823,7 @@ void test_vluxseg4ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_mu(
@@ -1840,7 +1840,7 @@ void test_vluxseg4ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_mu(
@@ -1857,7 +1857,7 @@ void test_vluxseg4ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_mu(
@@ -1874,7 +1874,7 @@ void test_vluxseg4ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_mu(
@@ -1891,7 +1891,7 @@ void test_vluxseg4ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_mu(
@@ -1908,7 +1908,7 @@ void test_vluxseg4ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_mu(
@@ -1925,7 +1925,7 @@ void test_vluxseg4ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_mu(
@@ -1942,7 +1942,7 @@ void test_vluxseg4ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vluxseg4ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_mu(
@@ -1976,7 +1976,7 @@ void test_vluxseg4ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_mu(
@@ -1993,7 +1993,7 @@ void test_vluxseg4ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_mu(
@@ -2010,7 +2010,7 @@ void test_vluxseg4ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_mu(
@@ -2027,7 +2027,7 @@ void test_vluxseg4ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_mu(
@@ -2044,7 +2044,7 @@ void test_vluxseg4ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_mu(
@@ -2061,7 +2061,7 @@ void test_vluxseg4ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_mu(
@@ -2078,7 +2078,7 @@ void test_vluxseg4ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_mu(
@@ -2095,7 +2095,7 @@ void test_vluxseg4ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_mu(
@@ -2112,7 +2112,7 @@ void test_vluxseg4ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_mu(
@@ -2129,7 +2129,7 @@ void test_vluxseg4ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_mu(
@@ -2146,7 +2146,7 @@ void test_vluxseg4ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_mu(
@@ -2163,7 +2163,7 @@ void test_vluxseg4ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_mu(
@@ -2180,7 +2180,7 @@ void test_vluxseg4ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_mu(
@@ -2197,7 +2197,7 @@ void test_vluxseg4ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vluxseg4ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_mu(
@@ -2231,7 +2231,7 @@ void test_vluxseg4ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_mu(
@@ -2248,7 +2248,7 @@ void test_vluxseg4ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_mu(
@@ -2265,7 +2265,7 @@ void test_vluxseg4ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_mu(
@@ -2282,7 +2282,7 @@ void test_vluxseg4ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_mu(
@@ -2299,7 +2299,7 @@ void test_vluxseg4ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_mu(
@@ -2316,7 +2316,7 @@ void test_vluxseg4ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_mu(
@@ -2333,7 +2333,7 @@ void test_vluxseg4ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_mu(
@@ -2350,7 +2350,7 @@ void test_vluxseg4ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_mu(
@@ -2367,7 +2367,7 @@ void test_vluxseg4ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_mu(
@@ -2384,6 +2384,6 @@ void test_vluxseg4ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c
index 0f46889c8aed..3f50cbf80c4d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c
@@ -21,7 +21,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_tu(
@@ -38,7 +38,7 @@ void test_vluxseg4ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_tu(
@@ -55,7 +55,7 @@ void test_vluxseg4ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_tu(
@@ -72,7 +72,7 @@ void test_vluxseg4ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_tu(
@@ -89,7 +89,7 @@ void test_vluxseg4ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_tu(
@@ -106,7 +106,7 @@ void test_vluxseg4ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_tu(
@@ -123,7 +123,7 @@ void test_vluxseg4ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_tu(
@@ -140,7 +140,7 @@ void test_vluxseg4ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_tu(
@@ -157,7 +157,7 @@ void test_vluxseg4ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_tu(
@@ -174,7 +174,7 @@ void test_vluxseg4ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_tu(
@@ -191,7 +191,7 @@ void test_vluxseg4ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_tu(
@@ -208,7 +208,7 @@ void test_vluxseg4ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_tu(
@@ -225,7 +225,7 @@ void test_vluxseg4ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_tu(
@@ -242,7 +242,7 @@ void test_vluxseg4ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_tu(
@@ -259,7 +259,7 @@ void test_vluxseg4ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_tu(
@@ -276,7 +276,7 @@ void test_vluxseg4ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_tu(
@@ -293,7 +293,7 @@ void test_vluxseg4ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_tu(
@@ -310,7 +310,7 @@ void test_vluxseg4ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_tu(
@@ -327,7 +327,7 @@ void test_vluxseg4ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_tu(
@@ -344,7 +344,7 @@ void test_vluxseg4ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_tu(
@@ -361,7 +361,7 @@ void test_vluxseg4ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_tu(
@@ -378,7 +378,7 @@ void test_vluxseg4ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_tu(
@@ -395,7 +395,7 @@ void test_vluxseg4ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_tu(
@@ -412,7 +412,7 @@ void test_vluxseg4ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_tu(
@@ -429,7 +429,7 @@ void test_vluxseg4ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_tu(
@@ -446,7 +446,7 @@ void test_vluxseg4ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_tu(
@@ -463,7 +463,7 @@ void test_vluxseg4ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_tu(
@@ -480,7 +480,7 @@ void test_vluxseg4ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_tu(
@@ -497,7 +497,7 @@ void test_vluxseg4ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_tu(
@@ -514,7 +514,7 @@ void test_vluxseg4ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_tu(
@@ -531,7 +531,7 @@ void test_vluxseg4ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_tu(
@@ -548,7 +548,7 @@ void test_vluxseg4ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_tu(
@@ -565,7 +565,7 @@ void test_vluxseg4ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_tu(
@@ -582,7 +582,7 @@ void test_vluxseg4ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_tu(
@@ -599,7 +599,7 @@ void test_vluxseg4ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_tu(
@@ -616,7 +616,7 @@ void test_vluxseg4ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_tu(
@@ -633,7 +633,7 @@ void test_vluxseg4ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_tum(
@@ -650,7 +650,7 @@ void test_vluxseg4ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_tum(
@@ -667,7 +667,7 @@ void test_vluxseg4ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_tum(
@@ -684,7 +684,7 @@ void test_vluxseg4ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_tum(
@@ -701,7 +701,7 @@ void test_vluxseg4ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_tum(
@@ -718,7 +718,7 @@ void test_vluxseg4ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_tum(
@@ -735,7 +735,7 @@ void test_vluxseg4ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_tum(
@@ -752,7 +752,7 @@ void test_vluxseg4ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_tum(
@@ -769,7 +769,7 @@ void test_vluxseg4ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_tum(
@@ -786,7 +786,7 @@ void test_vluxseg4ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_tum(
@@ -803,7 +803,7 @@ void test_vluxseg4ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_tum(
@@ -820,7 +820,7 @@ void test_vluxseg4ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_tum(
@@ -837,7 +837,7 @@ void test_vluxseg4ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_tum(
@@ -854,7 +854,7 @@ void test_vluxseg4ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_tum(
@@ -871,7 +871,7 @@ void test_vluxseg4ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_tum(
@@ -888,7 +888,7 @@ void test_vluxseg4ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_tum(
@@ -905,7 +905,7 @@ void test_vluxseg4ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_tum(
@@ -922,7 +922,7 @@ void test_vluxseg4ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_tum(
@@ -939,7 +939,7 @@ void test_vluxseg4ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_tum(
@@ -956,7 +956,7 @@ void test_vluxseg4ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_tum(
@@ -973,7 +973,7 @@ void test_vluxseg4ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_tum(
@@ -990,7 +990,7 @@ void test_vluxseg4ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_tum(
@@ -1007,7 +1007,7 @@ void test_vluxseg4ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_tum(
@@ -1024,7 +1024,7 @@ void test_vluxseg4ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_tum(
@@ -1041,7 +1041,7 @@ void test_vluxseg4ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_tum(
@@ -1058,7 +1058,7 @@ void test_vluxseg4ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_tum(
@@ -1075,7 +1075,7 @@ void test_vluxseg4ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_tum(
@@ -1092,7 +1092,7 @@ void test_vluxseg4ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_tum(
@@ -1109,7 +1109,7 @@ void test_vluxseg4ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_tum(
@@ -1126,7 +1126,7 @@ void test_vluxseg4ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_tum(
@@ -1143,7 +1143,7 @@ void test_vluxseg4ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_tum(
@@ -1160,7 +1160,7 @@ void test_vluxseg4ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_tum(
@@ -1177,7 +1177,7 @@ void test_vluxseg4ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_tum(
@@ -1194,7 +1194,7 @@ void test_vluxseg4ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_tum(
@@ -1211,7 +1211,7 @@ void test_vluxseg4ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_tum(
@@ -1228,7 +1228,7 @@ void test_vluxseg4ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_tum(
@@ -1245,7 +1245,7 @@ void test_vluxseg4ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_tum(
@@ -1262,7 +1262,7 @@ void test_vluxseg4ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_tumu(
@@ -1279,7 +1279,7 @@ void test_vluxseg4ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_tumu(
@@ -1296,7 +1296,7 @@ void test_vluxseg4ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_tumu(
@@ -1313,7 +1313,7 @@ void test_vluxseg4ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_tumu(
@@ -1330,7 +1330,7 @@ void test_vluxseg4ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_tumu(
@@ -1347,7 +1347,7 @@ void test_vluxseg4ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_tumu(
@@ -1364,7 +1364,7 @@ void test_vluxseg4ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_tumu(
@@ -1381,7 +1381,7 @@ void test_vluxseg4ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_tumu(
@@ -1398,7 +1398,7 @@ void test_vluxseg4ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_tumu(
@@ -1415,7 +1415,7 @@ void test_vluxseg4ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_tumu(
@@ -1432,7 +1432,7 @@ void test_vluxseg4ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_tumu(
@@ -1449,7 +1449,7 @@ void test_vluxseg4ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_tumu(
@@ -1466,7 +1466,7 @@ void test_vluxseg4ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_tumu(
@@ -1483,7 +1483,7 @@ void test_vluxseg4ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_tumu(
@@ -1500,7 +1500,7 @@ void test_vluxseg4ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_tumu(
@@ -1517,7 +1517,7 @@ void test_vluxseg4ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_tumu(
@@ -1534,7 +1534,7 @@ void test_vluxseg4ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_tumu(
@@ -1551,7 +1551,7 @@ void test_vluxseg4ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_tumu(
@@ -1568,7 +1568,7 @@ void test_vluxseg4ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_tumu(
@@ -1585,7 +1585,7 @@ void test_vluxseg4ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_tumu(
@@ -1602,7 +1602,7 @@ void test_vluxseg4ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_tumu(
@@ -1619,7 +1619,7 @@ void test_vluxseg4ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_tumu(
@@ -1636,7 +1636,7 @@ void test_vluxseg4ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_tumu(
@@ -1653,7 +1653,7 @@ void test_vluxseg4ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_tumu(
@@ -1670,7 +1670,7 @@ void test_vluxseg4ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_tumu(
@@ -1687,7 +1687,7 @@ void test_vluxseg4ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_tumu(
@@ -1704,7 +1704,7 @@ void test_vluxseg4ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_tumu(
@@ -1721,7 +1721,7 @@ void test_vluxseg4ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_tumu(
@@ -1738,7 +1738,7 @@ void test_vluxseg4ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_tumu(
@@ -1755,7 +1755,7 @@ void test_vluxseg4ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_tumu(
@@ -1772,7 +1772,7 @@ void test_vluxseg4ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_tumu(
@@ -1789,7 +1789,7 @@ void test_vluxseg4ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_tumu(
@@ -1806,7 +1806,7 @@ void test_vluxseg4ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_tumu(
@@ -1823,7 +1823,7 @@ void test_vluxseg4ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_tumu(
@@ -1840,7 +1840,7 @@ void test_vluxseg4ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_tumu(
@@ -1857,7 +1857,7 @@ void test_vluxseg4ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_tumu(
@@ -1874,7 +1874,7 @@ void test_vluxseg4ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_tumu(
@@ -1891,7 +1891,7 @@ void test_vluxseg4ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_mu(
@@ -1908,7 +1908,7 @@ void test_vluxseg4ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_mu(
@@ -1925,7 +1925,7 @@ void test_vluxseg4ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_mu(
@@ -1942,7 +1942,7 @@ void test_vluxseg4ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_mu(
@@ -1959,7 +1959,7 @@ void test_vluxseg4ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_mu(
@@ -1976,7 +1976,7 @@ void test_vluxseg4ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_mu(
@@ -1993,7 +1993,7 @@ void test_vluxseg4ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_mu(
@@ -2010,7 +2010,7 @@ void test_vluxseg4ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_mu(
@@ -2027,7 +2027,7 @@ void test_vluxseg4ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_mu(
@@ -2044,7 +2044,7 @@ void test_vluxseg4ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_mu(
@@ -2061,7 +2061,7 @@ void test_vluxseg4ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_mu(
@@ -2078,7 +2078,7 @@ void test_vluxseg4ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_mu(
@@ -2095,7 +2095,7 @@ void test_vluxseg4ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_mu(
@@ -2112,7 +2112,7 @@ void test_vluxseg4ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_mu(
@@ -2129,7 +2129,7 @@ void test_vluxseg4ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_mu(
@@ -2146,7 +2146,7 @@ void test_vluxseg4ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_mu(
@@ -2163,7 +2163,7 @@ void test_vluxseg4ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_mu(
@@ -2180,7 +2180,7 @@ void test_vluxseg4ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_mu(
@@ -2197,7 +2197,7 @@ void test_vluxseg4ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_mu(
@@ -2214,7 +2214,7 @@ void test_vluxseg4ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_mu(
@@ -2231,7 +2231,7 @@ void test_vluxseg4ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_mu(
@@ -2248,7 +2248,7 @@ void test_vluxseg4ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_mu(
@@ -2265,7 +2265,7 @@ void test_vluxseg4ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_mu(
@@ -2282,7 +2282,7 @@ void test_vluxseg4ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_mu(
@@ -2299,7 +2299,7 @@ void test_vluxseg4ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_mu(
@@ -2316,7 +2316,7 @@ void test_vluxseg4ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_mu(
@@ -2333,7 +2333,7 @@ void test_vluxseg4ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_mu(
@@ -2350,7 +2350,7 @@ void test_vluxseg4ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_mu(
@@ -2367,7 +2367,7 @@ void test_vluxseg4ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_mu(
@@ -2384,7 +2384,7 @@ void test_vluxseg4ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_mu(
@@ -2401,7 +2401,7 @@ void test_vluxseg4ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_mu(
@@ -2418,7 +2418,7 @@ void test_vluxseg4ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_mu(
@@ -2435,7 +2435,7 @@ void test_vluxseg4ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_mu(
@@ -2452,7 +2452,7 @@ void test_vluxseg4ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_mu(
@@ -2469,7 +2469,7 @@ void test_vluxseg4ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_mu(
@@ -2486,7 +2486,7 @@ void test_vluxseg4ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_mu(
@@ -2503,7 +2503,7 @@ void test_vluxseg4ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_mu(
@@ -2520,6 +2520,6 @@ void test_vluxseg4ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg4ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+ return __riscv_vluxseg4ei8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c
index e3812b0cc608..f20ee4c4f229 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vluxseg5ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vluxseg5ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_tu(
@@ -80,7 +80,7 @@ void test_vluxseg5ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_tu(
@@ -99,7 +99,7 @@ void test_vluxseg5ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_tu(
@@ -118,7 +118,7 @@ void test_vluxseg5ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_tu(
@@ -137,7 +137,7 @@ void test_vluxseg5ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_tu(
@@ -156,7 +156,7 @@ void test_vluxseg5ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_tu(
@@ -175,7 +175,7 @@ void test_vluxseg5ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_tu(
@@ -194,7 +194,7 @@ void test_vluxseg5ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_tu(
@@ -213,7 +213,7 @@ void test_vluxseg5ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_tu(
@@ -232,7 +232,7 @@ void test_vluxseg5ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_tu(
@@ -251,7 +251,7 @@ void test_vluxseg5ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_tu(
@@ -270,7 +270,7 @@ void test_vluxseg5ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vluxseg5ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_tu(
@@ -308,7 +308,7 @@ void test_vluxseg5ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_tu(
@@ -327,7 +327,7 @@ void test_vluxseg5ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_tu(
@@ -346,7 +346,7 @@ void test_vluxseg5ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_tu(
@@ -365,7 +365,7 @@ void test_vluxseg5ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_tu(
@@ -384,7 +384,7 @@ void test_vluxseg5ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_tu(
@@ -403,7 +403,7 @@ void test_vluxseg5ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_tu(
@@ -422,7 +422,7 @@ void test_vluxseg5ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_tu(
@@ -441,7 +441,7 @@ void test_vluxseg5ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_tu(
@@ -460,7 +460,7 @@ void test_vluxseg5ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_tu(
@@ -479,7 +479,7 @@ void test_vluxseg5ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_tu(
@@ -498,7 +498,7 @@ void test_vluxseg5ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_tum(
@@ -517,7 +517,7 @@ void test_vluxseg5ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_tum(
@@ -536,7 +536,7 @@ void test_vluxseg5ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_tum(
@@ -555,7 +555,7 @@ void test_vluxseg5ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_tum(
@@ -574,7 +574,7 @@ void test_vluxseg5ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_tum(
@@ -593,7 +593,7 @@ void test_vluxseg5ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_tum(
@@ -612,7 +612,7 @@ void test_vluxseg5ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_tum(
@@ -631,7 +631,7 @@ void test_vluxseg5ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_tum(
@@ -650,7 +650,7 @@ void test_vluxseg5ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_tum(
@@ -669,7 +669,7 @@ void test_vluxseg5ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_tum(
@@ -688,7 +688,7 @@ void test_vluxseg5ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_tum(
@@ -707,7 +707,7 @@ void test_vluxseg5ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_tum(
@@ -726,7 +726,7 @@ void test_vluxseg5ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_tum(
@@ -745,7 +745,7 @@ void test_vluxseg5ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_tum(
@@ -764,7 +764,7 @@ void test_vluxseg5ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_tum(
@@ -783,7 +783,7 @@ void test_vluxseg5ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_tum(
@@ -802,7 +802,7 @@ void test_vluxseg5ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_tum(
@@ -821,7 +821,7 @@ void test_vluxseg5ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_tum(
@@ -840,7 +840,7 @@ void test_vluxseg5ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_tum(
@@ -859,7 +859,7 @@ void test_vluxseg5ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_tum(
@@ -878,7 +878,7 @@ void test_vluxseg5ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_tum(
@@ -897,7 +897,7 @@ void test_vluxseg5ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_tum(
@@ -916,7 +916,7 @@ void test_vluxseg5ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_tum(
@@ -935,7 +935,7 @@ void test_vluxseg5ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_tum(
@@ -954,7 +954,7 @@ void test_vluxseg5ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_tum(
@@ -973,7 +973,7 @@ void test_vluxseg5ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_tum(
@@ -992,7 +992,7 @@ void test_vluxseg5ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_tumu(
@@ -1011,7 +1011,7 @@ void test_vluxseg5ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_tumu(
@@ -1030,7 +1030,7 @@ void test_vluxseg5ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_tumu(
@@ -1049,7 +1049,7 @@ void test_vluxseg5ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_tumu(
@@ -1068,7 +1068,7 @@ void test_vluxseg5ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_tumu(
@@ -1087,7 +1087,7 @@ void test_vluxseg5ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_tumu(
@@ -1106,7 +1106,7 @@ void test_vluxseg5ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_tumu(
@@ -1125,7 +1125,7 @@ void test_vluxseg5ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_tumu(
@@ -1144,7 +1144,7 @@ void test_vluxseg5ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_tumu(
@@ -1163,7 +1163,7 @@ void test_vluxseg5ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_tumu(
@@ -1182,7 +1182,7 @@ void test_vluxseg5ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_tumu(
@@ -1201,7 +1201,7 @@ void test_vluxseg5ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_tumu(
@@ -1220,7 +1220,7 @@ void test_vluxseg5ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vluxseg5ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_tumu(
@@ -1258,7 +1258,7 @@ void test_vluxseg5ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_tumu(
@@ -1277,7 +1277,7 @@ void test_vluxseg5ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vluxseg5ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_tumu(
@@ -1315,7 +1315,7 @@ void test_vluxseg5ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_tumu(
@@ -1334,7 +1334,7 @@ void test_vluxseg5ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_tumu(
@@ -1353,7 +1353,7 @@ void test_vluxseg5ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_tumu(
@@ -1372,7 +1372,7 @@ void test_vluxseg5ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_tumu(
@@ -1391,7 +1391,7 @@ void test_vluxseg5ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_tumu(
@@ -1410,7 +1410,7 @@ void test_vluxseg5ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg5ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_tumu(
@@ -1448,7 +1448,7 @@ void test_vluxseg5ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_tumu(
@@ -1467,7 +1467,7 @@ void test_vluxseg5ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_tumu(
@@ -1486,7 +1486,7 @@ void test_vluxseg5ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_mu(
@@ -1505,7 +1505,7 @@ void test_vluxseg5ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_mu(
@@ -1524,7 +1524,7 @@ void test_vluxseg5ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_mu(
@@ -1543,7 +1543,7 @@ void test_vluxseg5ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_mu(
@@ -1562,7 +1562,7 @@ void test_vluxseg5ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_mu(
@@ -1581,7 +1581,7 @@ void test_vluxseg5ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_mu(
@@ -1600,7 +1600,7 @@ void test_vluxseg5ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_mu(
@@ -1619,7 +1619,7 @@ void test_vluxseg5ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_mu(
@@ -1638,7 +1638,7 @@ void test_vluxseg5ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_mu(
@@ -1657,7 +1657,7 @@ void test_vluxseg5ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_mu(
@@ -1676,7 +1676,7 @@ void test_vluxseg5ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_mu(
@@ -1695,7 +1695,7 @@ void test_vluxseg5ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_mu(
@@ -1714,7 +1714,7 @@ void test_vluxseg5ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_mu(
@@ -1733,7 +1733,7 @@ void test_vluxseg5ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_mu(
@@ -1752,7 +1752,7 @@ void test_vluxseg5ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_mu(
@@ -1771,7 +1771,7 @@ void test_vluxseg5ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_mu(
@@ -1790,7 +1790,7 @@ void test_vluxseg5ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_mu(
@@ -1809,7 +1809,7 @@ void test_vluxseg5ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_mu(
@@ -1828,7 +1828,7 @@ void test_vluxseg5ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_mu(
@@ -1847,7 +1847,7 @@ void test_vluxseg5ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_mu(
@@ -1866,7 +1866,7 @@ void test_vluxseg5ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_mu(
@@ -1885,7 +1885,7 @@ void test_vluxseg5ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_mu(
@@ -1904,7 +1904,7 @@ void test_vluxseg5ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_mu(
@@ -1923,7 +1923,7 @@ void test_vluxseg5ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_mu(
@@ -1942,7 +1942,7 @@ void test_vluxseg5ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_mu(
@@ -1961,7 +1961,7 @@ void test_vluxseg5ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_mu(
@@ -1980,6 +1980,6 @@ void test_vluxseg5ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c
index b1667e5a3474..5858aa7d277d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vluxseg5ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vluxseg5ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_tu(
@@ -80,7 +80,7 @@ void test_vluxseg5ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_tu(
@@ -99,7 +99,7 @@ void test_vluxseg5ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_tu(
@@ -118,7 +118,7 @@ void test_vluxseg5ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_tu(
@@ -137,7 +137,7 @@ void test_vluxseg5ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_tu(
@@ -156,7 +156,7 @@ void test_vluxseg5ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_tu(
@@ -175,7 +175,7 @@ void test_vluxseg5ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_tu(
@@ -194,7 +194,7 @@ void test_vluxseg5ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_tu(
@@ -213,7 +213,7 @@ void test_vluxseg5ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_tu(
@@ -232,7 +232,7 @@ void test_vluxseg5ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_tu(
@@ -251,7 +251,7 @@ void test_vluxseg5ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_tu(
@@ -270,7 +270,7 @@ void test_vluxseg5ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vluxseg5ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_tu(
@@ -308,7 +308,7 @@ void test_vluxseg5ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_tu(
@@ -327,7 +327,7 @@ void test_vluxseg5ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_tu(
@@ -346,7 +346,7 @@ void test_vluxseg5ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_tu(
@@ -365,7 +365,7 @@ void test_vluxseg5ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_tu(
@@ -384,7 +384,7 @@ void test_vluxseg5ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_tu(
@@ -403,7 +403,7 @@ void test_vluxseg5ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_tu(
@@ -422,7 +422,7 @@ void test_vluxseg5ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_tu(
@@ -441,7 +441,7 @@ void test_vluxseg5ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_tu(
@@ -460,7 +460,7 @@ void test_vluxseg5ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_tu(
@@ -479,7 +479,7 @@ void test_vluxseg5ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_tu(
@@ -498,7 +498,7 @@ void test_vluxseg5ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_tum(
@@ -517,7 +517,7 @@ void test_vluxseg5ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_tum(
@@ -536,7 +536,7 @@ void test_vluxseg5ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_tum(
@@ -555,7 +555,7 @@ void test_vluxseg5ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_tum(
@@ -574,7 +574,7 @@ void test_vluxseg5ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_tum(
@@ -593,7 +593,7 @@ void test_vluxseg5ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_tum(
@@ -612,7 +612,7 @@ void test_vluxseg5ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_tum(
@@ -631,7 +631,7 @@ void test_vluxseg5ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_tum(
@@ -650,7 +650,7 @@ void test_vluxseg5ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_tum(
@@ -669,7 +669,7 @@ void test_vluxseg5ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_tum(
@@ -688,7 +688,7 @@ void test_vluxseg5ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_tum(
@@ -707,7 +707,7 @@ void test_vluxseg5ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_tum(
@@ -726,7 +726,7 @@ void test_vluxseg5ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_tum(
@@ -745,7 +745,7 @@ void test_vluxseg5ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_tum(
@@ -764,7 +764,7 @@ void test_vluxseg5ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_tum(
@@ -783,7 +783,7 @@ void test_vluxseg5ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_tum(
@@ -802,7 +802,7 @@ void test_vluxseg5ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_tum(
@@ -821,7 +821,7 @@ void test_vluxseg5ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_tum(
@@ -840,7 +840,7 @@ void test_vluxseg5ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_tum(
@@ -859,7 +859,7 @@ void test_vluxseg5ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_tum(
@@ -878,7 +878,7 @@ void test_vluxseg5ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_tum(
@@ -897,7 +897,7 @@ void test_vluxseg5ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_tum(
@@ -916,7 +916,7 @@ void test_vluxseg5ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_tum(
@@ -935,7 +935,7 @@ void test_vluxseg5ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_tum(
@@ -954,7 +954,7 @@ void test_vluxseg5ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_tum(
@@ -973,7 +973,7 @@ void test_vluxseg5ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_tum(
@@ -992,7 +992,7 @@ void test_vluxseg5ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_tumu(
@@ -1011,7 +1011,7 @@ void test_vluxseg5ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_tumu(
@@ -1030,7 +1030,7 @@ void test_vluxseg5ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_tumu(
@@ -1049,7 +1049,7 @@ void test_vluxseg5ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_tumu(
@@ -1068,7 +1068,7 @@ void test_vluxseg5ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_tumu(
@@ -1087,7 +1087,7 @@ void test_vluxseg5ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_tumu(
@@ -1106,7 +1106,7 @@ void test_vluxseg5ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_tumu(
@@ -1125,7 +1125,7 @@ void test_vluxseg5ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_tumu(
@@ -1144,7 +1144,7 @@ void test_vluxseg5ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_tumu(
@@ -1163,7 +1163,7 @@ void test_vluxseg5ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_tumu(
@@ -1182,7 +1182,7 @@ void test_vluxseg5ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_tumu(
@@ -1201,7 +1201,7 @@ void test_vluxseg5ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_tumu(
@@ -1220,7 +1220,7 @@ void test_vluxseg5ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vluxseg5ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_tumu(
@@ -1258,7 +1258,7 @@ void test_vluxseg5ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_tumu(
@@ -1277,7 +1277,7 @@ void test_vluxseg5ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vluxseg5ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_tumu(
@@ -1315,7 +1315,7 @@ void test_vluxseg5ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_tumu(
@@ -1334,7 +1334,7 @@ void test_vluxseg5ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_tumu(
@@ -1353,7 +1353,7 @@ void test_vluxseg5ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_tumu(
@@ -1372,7 +1372,7 @@ void test_vluxseg5ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_tumu(
@@ -1391,7 +1391,7 @@ void test_vluxseg5ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_tumu(
@@ -1410,7 +1410,7 @@ void test_vluxseg5ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg5ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_tumu(
@@ -1448,7 +1448,7 @@ void test_vluxseg5ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_tumu(
@@ -1467,7 +1467,7 @@ void test_vluxseg5ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_tumu(
@@ -1486,7 +1486,7 @@ void test_vluxseg5ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_mu(
@@ -1505,7 +1505,7 @@ void test_vluxseg5ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_mu(
@@ -1524,7 +1524,7 @@ void test_vluxseg5ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_mu(
@@ -1543,7 +1543,7 @@ void test_vluxseg5ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_mu(
@@ -1562,7 +1562,7 @@ void test_vluxseg5ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_mu(
@@ -1581,7 +1581,7 @@ void test_vluxseg5ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_mu(
@@ -1600,7 +1600,7 @@ void test_vluxseg5ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_mu(
@@ -1619,7 +1619,7 @@ void test_vluxseg5ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_mu(
@@ -1638,7 +1638,7 @@ void test_vluxseg5ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_mu(
@@ -1657,7 +1657,7 @@ void test_vluxseg5ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_mu(
@@ -1676,7 +1676,7 @@ void test_vluxseg5ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_mu(
@@ -1695,7 +1695,7 @@ void test_vluxseg5ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_mu(
@@ -1714,7 +1714,7 @@ void test_vluxseg5ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_mu(
@@ -1733,7 +1733,7 @@ void test_vluxseg5ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_mu(
@@ -1752,7 +1752,7 @@ void test_vluxseg5ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_mu(
@@ -1771,7 +1771,7 @@ void test_vluxseg5ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_mu(
@@ -1790,7 +1790,7 @@ void test_vluxseg5ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_mu(
@@ -1809,7 +1809,7 @@ void test_vluxseg5ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_mu(
@@ -1828,7 +1828,7 @@ void test_vluxseg5ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_mu(
@@ -1847,7 +1847,7 @@ void test_vluxseg5ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_mu(
@@ -1866,7 +1866,7 @@ void test_vluxseg5ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_mu(
@@ -1885,7 +1885,7 @@ void test_vluxseg5ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_mu(
@@ -1904,7 +1904,7 @@ void test_vluxseg5ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_mu(
@@ -1923,7 +1923,7 @@ void test_vluxseg5ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_mu(
@@ -1942,7 +1942,7 @@ void test_vluxseg5ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_mu(
@@ -1961,7 +1961,7 @@ void test_vluxseg5ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_mu(
@@ -1980,6 +1980,6 @@ void test_vluxseg5ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c
index db595daea511..d923eccc42f4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vluxseg5ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vluxseg5ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_tu(
@@ -80,7 +80,7 @@ void test_vluxseg5ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_tu(
@@ -99,7 +99,7 @@ void test_vluxseg5ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_tu(
@@ -118,7 +118,7 @@ void test_vluxseg5ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_tu(
@@ -137,7 +137,7 @@ void test_vluxseg5ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_tu(
@@ -156,7 +156,7 @@ void test_vluxseg5ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_tu(
@@ -175,7 +175,7 @@ void test_vluxseg5ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_tu(
@@ -194,7 +194,7 @@ void test_vluxseg5ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_tu(
@@ -213,7 +213,7 @@ void test_vluxseg5ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_tu(
@@ -232,7 +232,7 @@ void test_vluxseg5ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_tu(
@@ -251,7 +251,7 @@ void test_vluxseg5ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_tu(
@@ -270,7 +270,7 @@ void test_vluxseg5ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vluxseg5ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_tu(
@@ -308,7 +308,7 @@ void test_vluxseg5ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_tu(
@@ -327,7 +327,7 @@ void test_vluxseg5ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_tu(
@@ -346,7 +346,7 @@ void test_vluxseg5ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_tu(
@@ -365,7 +365,7 @@ void test_vluxseg5ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_tu(
@@ -384,7 +384,7 @@ void test_vluxseg5ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_tu(
@@ -403,7 +403,7 @@ void test_vluxseg5ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_tu(
@@ -422,7 +422,7 @@ void test_vluxseg5ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_tu(
@@ -441,7 +441,7 @@ void test_vluxseg5ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_tu(
@@ -460,7 +460,7 @@ void test_vluxseg5ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_tu(
@@ -479,7 +479,7 @@ void test_vluxseg5ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_tu(
@@ -498,7 +498,7 @@ void test_vluxseg5ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_tum(
@@ -517,7 +517,7 @@ void test_vluxseg5ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_tum(
@@ -536,7 +536,7 @@ void test_vluxseg5ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_tum(
@@ -555,7 +555,7 @@ void test_vluxseg5ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_tum(
@@ -574,7 +574,7 @@ void test_vluxseg5ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_tum(
@@ -593,7 +593,7 @@ void test_vluxseg5ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_tum(
@@ -612,7 +612,7 @@ void test_vluxseg5ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_tum(
@@ -631,7 +631,7 @@ void test_vluxseg5ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_tum(
@@ -650,7 +650,7 @@ void test_vluxseg5ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_tum(
@@ -669,7 +669,7 @@ void test_vluxseg5ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_tum(
@@ -688,7 +688,7 @@ void test_vluxseg5ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_tum(
@@ -707,7 +707,7 @@ void test_vluxseg5ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_tum(
@@ -726,7 +726,7 @@ void test_vluxseg5ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_tum(
@@ -745,7 +745,7 @@ void test_vluxseg5ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_tum(
@@ -764,7 +764,7 @@ void test_vluxseg5ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_tum(
@@ -783,7 +783,7 @@ void test_vluxseg5ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_tum(
@@ -802,7 +802,7 @@ void test_vluxseg5ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_tum(
@@ -821,7 +821,7 @@ void test_vluxseg5ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_tum(
@@ -840,7 +840,7 @@ void test_vluxseg5ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_tum(
@@ -859,7 +859,7 @@ void test_vluxseg5ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_tum(
@@ -878,7 +878,7 @@ void test_vluxseg5ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_tum(
@@ -897,7 +897,7 @@ void test_vluxseg5ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_tum(
@@ -916,7 +916,7 @@ void test_vluxseg5ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_tum(
@@ -935,7 +935,7 @@ void test_vluxseg5ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_tum(
@@ -954,7 +954,7 @@ void test_vluxseg5ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_tum(
@@ -973,7 +973,7 @@ void test_vluxseg5ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_tum(
@@ -992,7 +992,7 @@ void test_vluxseg5ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_tumu(
@@ -1011,7 +1011,7 @@ void test_vluxseg5ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_tumu(
@@ -1030,7 +1030,7 @@ void test_vluxseg5ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_tumu(
@@ -1049,7 +1049,7 @@ void test_vluxseg5ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_tumu(
@@ -1068,7 +1068,7 @@ void test_vluxseg5ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_tumu(
@@ -1087,7 +1087,7 @@ void test_vluxseg5ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_tumu(
@@ -1106,7 +1106,7 @@ void test_vluxseg5ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_tumu(
@@ -1125,7 +1125,7 @@ void test_vluxseg5ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_tumu(
@@ -1144,7 +1144,7 @@ void test_vluxseg5ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_tumu(
@@ -1163,7 +1163,7 @@ void test_vluxseg5ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_tumu(
@@ -1182,7 +1182,7 @@ void test_vluxseg5ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_tumu(
@@ -1201,7 +1201,7 @@ void test_vluxseg5ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_tumu(
@@ -1220,7 +1220,7 @@ void test_vluxseg5ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vluxseg5ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_tumu(
@@ -1258,7 +1258,7 @@ void test_vluxseg5ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_tumu(
@@ -1277,7 +1277,7 @@ void test_vluxseg5ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vluxseg5ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_tumu(
@@ -1315,7 +1315,7 @@ void test_vluxseg5ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_tumu(
@@ -1334,7 +1334,7 @@ void test_vluxseg5ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_tumu(
@@ -1353,7 +1353,7 @@ void test_vluxseg5ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_tumu(
@@ -1372,7 +1372,7 @@ void test_vluxseg5ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_tumu(
@@ -1391,7 +1391,7 @@ void test_vluxseg5ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_tumu(
@@ -1410,7 +1410,7 @@ void test_vluxseg5ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg5ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_tumu(
@@ -1448,7 +1448,7 @@ void test_vluxseg5ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_tumu(
@@ -1467,7 +1467,7 @@ void test_vluxseg5ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_tumu(
@@ -1486,7 +1486,7 @@ void test_vluxseg5ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_mu(
@@ -1505,7 +1505,7 @@ void test_vluxseg5ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_mu(
@@ -1524,7 +1524,7 @@ void test_vluxseg5ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_mu(
@@ -1543,7 +1543,7 @@ void test_vluxseg5ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_mu(
@@ -1562,7 +1562,7 @@ void test_vluxseg5ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_mu(
@@ -1581,7 +1581,7 @@ void test_vluxseg5ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_mu(
@@ -1600,7 +1600,7 @@ void test_vluxseg5ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_mu(
@@ -1619,7 +1619,7 @@ void test_vluxseg5ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_mu(
@@ -1638,7 +1638,7 @@ void test_vluxseg5ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_mu(
@@ -1657,7 +1657,7 @@ void test_vluxseg5ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_mu(
@@ -1676,7 +1676,7 @@ void test_vluxseg5ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_mu(
@@ -1695,7 +1695,7 @@ void test_vluxseg5ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_mu(
@@ -1714,7 +1714,7 @@ void test_vluxseg5ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_mu(
@@ -1733,7 +1733,7 @@ void test_vluxseg5ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_mu(
@@ -1752,7 +1752,7 @@ void test_vluxseg5ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_mu(
@@ -1771,7 +1771,7 @@ void test_vluxseg5ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_mu(
@@ -1790,7 +1790,7 @@ void test_vluxseg5ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_mu(
@@ -1809,7 +1809,7 @@ void test_vluxseg5ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_mu(
@@ -1828,7 +1828,7 @@ void test_vluxseg5ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_mu(
@@ -1847,7 +1847,7 @@ void test_vluxseg5ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_mu(
@@ -1866,7 +1866,7 @@ void test_vluxseg5ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_mu(
@@ -1885,7 +1885,7 @@ void test_vluxseg5ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_mu(
@@ -1904,7 +1904,7 @@ void test_vluxseg5ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_mu(
@@ -1923,7 +1923,7 @@ void test_vluxseg5ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_mu(
@@ -1942,7 +1942,7 @@ void test_vluxseg5ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_mu(
@@ -1961,7 +1961,7 @@ void test_vluxseg5ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_mu(
@@ -1980,6 +1980,6 @@ void test_vluxseg5ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c
index a3427df1b61d..2689cba94642 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c
@@ -23,7 +23,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_tu(
@@ -42,7 +42,7 @@ void test_vluxseg5ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_tu(
@@ -61,7 +61,7 @@ void test_vluxseg5ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_tu(
@@ -80,7 +80,7 @@ void test_vluxseg5ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_tu(
@@ -99,7 +99,7 @@ void test_vluxseg5ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_tu(
@@ -118,7 +118,7 @@ void test_vluxseg5ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_tu(
@@ -137,7 +137,7 @@ void test_vluxseg5ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_tu(
@@ -156,7 +156,7 @@ void test_vluxseg5ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_tu(
@@ -175,7 +175,7 @@ void test_vluxseg5ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_tu(
@@ -194,7 +194,7 @@ void test_vluxseg5ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_tu(
@@ -213,7 +213,7 @@ void test_vluxseg5ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_tu(
@@ -232,7 +232,7 @@ void test_vluxseg5ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_tu(
@@ -251,7 +251,7 @@ void test_vluxseg5ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_tu(
@@ -270,7 +270,7 @@ void test_vluxseg5ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_tu(
@@ -289,7 +289,7 @@ void test_vluxseg5ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_tu(
@@ -308,7 +308,7 @@ void test_vluxseg5ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_tu(
@@ -327,7 +327,7 @@ void test_vluxseg5ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_tu(
@@ -346,7 +346,7 @@ void test_vluxseg5ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_tu(
@@ -365,7 +365,7 @@ void test_vluxseg5ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_tu(
@@ -384,7 +384,7 @@ void test_vluxseg5ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_tu(
@@ -403,7 +403,7 @@ void test_vluxseg5ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_tu(
@@ -422,7 +422,7 @@ void test_vluxseg5ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_tu(
@@ -441,7 +441,7 @@ void test_vluxseg5ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_tu(
@@ -460,7 +460,7 @@ void test_vluxseg5ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_tu(
@@ -479,7 +479,7 @@ void test_vluxseg5ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_tu(
@@ -498,7 +498,7 @@ void test_vluxseg5ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_tum(
@@ -517,7 +517,7 @@ void test_vluxseg5ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_tum(
@@ -536,7 +536,7 @@ void test_vluxseg5ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_tum(
@@ -555,7 +555,7 @@ void test_vluxseg5ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_tum(
@@ -574,7 +574,7 @@ void test_vluxseg5ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_tum(
@@ -593,7 +593,7 @@ void test_vluxseg5ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_tum(
@@ -612,7 +612,7 @@ void test_vluxseg5ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_tum(
@@ -631,7 +631,7 @@ void test_vluxseg5ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_tum(
@@ -650,7 +650,7 @@ void test_vluxseg5ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_tum(
@@ -669,7 +669,7 @@ void test_vluxseg5ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_tum(
@@ -688,7 +688,7 @@ void test_vluxseg5ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_tum(
@@ -707,7 +707,7 @@ void test_vluxseg5ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_tum(
@@ -726,7 +726,7 @@ void test_vluxseg5ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_tum(
@@ -745,7 +745,7 @@ void test_vluxseg5ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_tum(
@@ -764,7 +764,7 @@ void test_vluxseg5ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_tum(
@@ -783,7 +783,7 @@ void test_vluxseg5ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_tum(
@@ -802,7 +802,7 @@ void test_vluxseg5ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_tum(
@@ -821,7 +821,7 @@ void test_vluxseg5ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_tum(
@@ -840,7 +840,7 @@ void test_vluxseg5ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_tum(
@@ -859,7 +859,7 @@ void test_vluxseg5ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_tum(
@@ -878,7 +878,7 @@ void test_vluxseg5ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_tum(
@@ -897,7 +897,7 @@ void test_vluxseg5ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_tum(
@@ -916,7 +916,7 @@ void test_vluxseg5ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_tum(
@@ -935,7 +935,7 @@ void test_vluxseg5ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_tum(
@@ -954,7 +954,7 @@ void test_vluxseg5ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_tum(
@@ -973,7 +973,7 @@ void test_vluxseg5ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_tum(
@@ -992,7 +992,7 @@ void test_vluxseg5ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_tumu(
@@ -1011,7 +1011,7 @@ void test_vluxseg5ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_tumu(
@@ -1030,7 +1030,7 @@ void test_vluxseg5ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_tumu(
@@ -1049,7 +1049,7 @@ void test_vluxseg5ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_tumu(
@@ -1068,7 +1068,7 @@ void test_vluxseg5ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_tumu(
@@ -1087,7 +1087,7 @@ void test_vluxseg5ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_tumu(
@@ -1106,7 +1106,7 @@ void test_vluxseg5ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_tumu(
@@ -1125,7 +1125,7 @@ void test_vluxseg5ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_tumu(
@@ -1144,7 +1144,7 @@ void test_vluxseg5ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_tumu(
@@ -1163,7 +1163,7 @@ void test_vluxseg5ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_tumu(
@@ -1182,7 +1182,7 @@ void test_vluxseg5ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_tumu(
@@ -1201,7 +1201,7 @@ void test_vluxseg5ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_tumu(
@@ -1220,7 +1220,7 @@ void test_vluxseg5ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_tumu(
@@ -1239,7 +1239,7 @@ void test_vluxseg5ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_tumu(
@@ -1258,7 +1258,7 @@ void test_vluxseg5ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_tumu(
@@ -1277,7 +1277,7 @@ void test_vluxseg5ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_tumu(
@@ -1296,7 +1296,7 @@ void test_vluxseg5ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_tumu(
@@ -1315,7 +1315,7 @@ void test_vluxseg5ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_tumu(
@@ -1334,7 +1334,7 @@ void test_vluxseg5ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_tumu(
@@ -1353,7 +1353,7 @@ void test_vluxseg5ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_tumu(
@@ -1372,7 +1372,7 @@ void test_vluxseg5ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_tumu(
@@ -1391,7 +1391,7 @@ void test_vluxseg5ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_tumu(
@@ -1410,7 +1410,7 @@ void test_vluxseg5ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg5ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_tumu(
@@ -1448,7 +1448,7 @@ void test_vluxseg5ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_tumu(
@@ -1467,7 +1467,7 @@ void test_vluxseg5ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_tumu(
@@ -1486,7 +1486,7 @@ void test_vluxseg5ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_mu(
@@ -1505,7 +1505,7 @@ void test_vluxseg5ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_mu(
@@ -1524,7 +1524,7 @@ void test_vluxseg5ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_mu(
@@ -1543,7 +1543,7 @@ void test_vluxseg5ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_mu(
@@ -1562,7 +1562,7 @@ void test_vluxseg5ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_mu(
@@ -1581,7 +1581,7 @@ void test_vluxseg5ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_mu(
@@ -1600,7 +1600,7 @@ void test_vluxseg5ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_mu(
@@ -1619,7 +1619,7 @@ void test_vluxseg5ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_mu(
@@ -1638,7 +1638,7 @@ void test_vluxseg5ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_mu(
@@ -1657,7 +1657,7 @@ void test_vluxseg5ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_mu(
@@ -1676,7 +1676,7 @@ void test_vluxseg5ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_mu(
@@ -1695,7 +1695,7 @@ void test_vluxseg5ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_mu(
@@ -1714,7 +1714,7 @@ void test_vluxseg5ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_mu(
@@ -1733,7 +1733,7 @@ void test_vluxseg5ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_mu(
@@ -1752,7 +1752,7 @@ void test_vluxseg5ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_mu(
@@ -1771,7 +1771,7 @@ void test_vluxseg5ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_mu(
@@ -1790,7 +1790,7 @@ void test_vluxseg5ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_mu(
@@ -1809,7 +1809,7 @@ void test_vluxseg5ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_mu(
@@ -1828,7 +1828,7 @@ void test_vluxseg5ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_mu(
@@ -1847,7 +1847,7 @@ void test_vluxseg5ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_mu(
@@ -1866,7 +1866,7 @@ void test_vluxseg5ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_mu(
@@ -1885,7 +1885,7 @@ void test_vluxseg5ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_mu(
@@ -1904,7 +1904,7 @@ void test_vluxseg5ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_mu(
@@ -1923,7 +1923,7 @@ void test_vluxseg5ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_mu(
@@ -1942,7 +1942,7 @@ void test_vluxseg5ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_mu(
@@ -1961,7 +1961,7 @@ void test_vluxseg5ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_mu(
@@ -1980,6 +1980,6 @@ void test_vluxseg5ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg5ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+ return __riscv_vluxseg5ei8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c
index 8d496bab54ce..a148ada3058a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vluxseg6ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vluxseg6ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_tu(
@@ -88,7 +88,7 @@ void test_vluxseg6ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_tu(
@@ -109,7 +109,7 @@ void test_vluxseg6ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_tu(
@@ -130,7 +130,7 @@ void test_vluxseg6ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_tu(
@@ -151,7 +151,7 @@ void test_vluxseg6ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_tu(
@@ -172,7 +172,7 @@ void test_vluxseg6ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_tu(
@@ -193,7 +193,7 @@ void test_vluxseg6ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_tu(
@@ -214,7 +214,7 @@ void test_vluxseg6ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_tu(
@@ -235,7 +235,7 @@ void test_vluxseg6ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_tu(
@@ -256,7 +256,7 @@ void test_vluxseg6ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vluxseg6ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_tu(
@@ -298,7 +298,7 @@ void test_vluxseg6ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_tu(
@@ -319,7 +319,7 @@ void test_vluxseg6ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_tu(
@@ -340,7 +340,7 @@ void test_vluxseg6ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_tu(
@@ -361,7 +361,7 @@ void test_vluxseg6ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_tu(
@@ -382,7 +382,7 @@ void test_vluxseg6ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_tu(
@@ -403,7 +403,7 @@ void test_vluxseg6ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_tu(
@@ -424,7 +424,7 @@ void test_vluxseg6ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_tu(
@@ -445,7 +445,7 @@ void test_vluxseg6ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_tu(
@@ -466,7 +466,7 @@ void test_vluxseg6ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_tu(
@@ -487,7 +487,7 @@ void test_vluxseg6ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_tu(
@@ -508,7 +508,7 @@ void test_vluxseg6ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_tu(
@@ -529,7 +529,7 @@ void test_vluxseg6ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_tu(
@@ -550,7 +550,7 @@ void test_vluxseg6ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_tum(
@@ -571,7 +571,7 @@ void test_vluxseg6ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_tum(
@@ -592,7 +592,7 @@ void test_vluxseg6ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_tum(
@@ -613,7 +613,7 @@ void test_vluxseg6ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vluxseg6ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_tum(
@@ -655,7 +655,7 @@ void test_vluxseg6ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_tum(
@@ -676,7 +676,7 @@ void test_vluxseg6ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_tum(
@@ -697,7 +697,7 @@ void test_vluxseg6ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_tum(
@@ -718,7 +718,7 @@ void test_vluxseg6ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vluxseg6ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_tum(
@@ -760,7 +760,7 @@ void test_vluxseg6ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_tum(
@@ -781,7 +781,7 @@ void test_vluxseg6ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_tum(
@@ -802,7 +802,7 @@ void test_vluxseg6ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_tum(
@@ -823,7 +823,7 @@ void test_vluxseg6ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vluxseg6ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_tum(
@@ -865,7 +865,7 @@ void test_vluxseg6ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_tum(
@@ -886,7 +886,7 @@ void test_vluxseg6ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_tum(
@@ -907,7 +907,7 @@ void test_vluxseg6ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_tum(
@@ -928,7 +928,7 @@ void test_vluxseg6ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vluxseg6ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_tum(
@@ -970,7 +970,7 @@ void test_vluxseg6ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_tum(
@@ -991,7 +991,7 @@ void test_vluxseg6ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_tum(
@@ -1012,7 +1012,7 @@ void test_vluxseg6ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_tum(
@@ -1033,7 +1033,7 @@ void test_vluxseg6ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg6ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_tum(
@@ -1075,7 +1075,7 @@ void test_vluxseg6ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_tum(
@@ -1096,7 +1096,7 @@ void test_vluxseg6ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_tumu(
@@ -1117,7 +1117,7 @@ void test_vluxseg6ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_tumu(
@@ -1138,7 +1138,7 @@ void test_vluxseg6ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vluxseg6ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_tumu(
@@ -1180,7 +1180,7 @@ void test_vluxseg6ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_tumu(
@@ -1201,7 +1201,7 @@ void test_vluxseg6ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_tumu(
@@ -1222,7 +1222,7 @@ void test_vluxseg6ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_tumu(
@@ -1243,7 +1243,7 @@ void test_vluxseg6ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vluxseg6ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_tumu(
@@ -1285,7 +1285,7 @@ void test_vluxseg6ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_tumu(
@@ -1306,7 +1306,7 @@ void test_vluxseg6ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_tumu(
@@ -1327,7 +1327,7 @@ void test_vluxseg6ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_tumu(
@@ -1348,7 +1348,7 @@ void test_vluxseg6ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg6ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_tumu(
@@ -1390,7 +1390,7 @@ void test_vluxseg6ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_tumu(
@@ -1411,7 +1411,7 @@ void test_vluxseg6ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_tumu(
@@ -1432,7 +1432,7 @@ void test_vluxseg6ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_tumu(
@@ -1453,7 +1453,7 @@ void test_vluxseg6ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_tumu(
@@ -1474,7 +1474,7 @@ void test_vluxseg6ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_tumu(
@@ -1495,7 +1495,7 @@ void test_vluxseg6ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_tumu(
@@ -1516,7 +1516,7 @@ void test_vluxseg6ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_tumu(
@@ -1537,7 +1537,7 @@ void test_vluxseg6ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_tumu(
@@ -1558,7 +1558,7 @@ void test_vluxseg6ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg6ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_tumu(
@@ -1600,7 +1600,7 @@ void test_vluxseg6ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_tumu(
@@ -1621,7 +1621,7 @@ void test_vluxseg6ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_tumu(
@@ -1642,7 +1642,7 @@ void test_vluxseg6ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_mu(
@@ -1663,7 +1663,7 @@ void test_vluxseg6ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_mu(
@@ -1684,7 +1684,7 @@ void test_vluxseg6ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_mu(
@@ -1705,7 +1705,7 @@ void test_vluxseg6ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_mu(
@@ -1726,7 +1726,7 @@ void test_vluxseg6ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_mu(
@@ -1747,7 +1747,7 @@ void test_vluxseg6ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_mu(
@@ -1768,7 +1768,7 @@ void test_vluxseg6ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_mu(
@@ -1789,7 +1789,7 @@ void test_vluxseg6ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_mu(
@@ -1810,7 +1810,7 @@ void test_vluxseg6ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_mu(
@@ -1831,7 +1831,7 @@ void test_vluxseg6ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_mu(
@@ -1852,7 +1852,7 @@ void test_vluxseg6ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_mu(
@@ -1873,7 +1873,7 @@ void test_vluxseg6ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_mu(
@@ -1894,7 +1894,7 @@ void test_vluxseg6ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vluxseg6ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_mu(
@@ -1936,7 +1936,7 @@ void test_vluxseg6ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_mu(
@@ -1957,7 +1957,7 @@ void test_vluxseg6ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_mu(
@@ -1978,7 +1978,7 @@ void test_vluxseg6ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_mu(
@@ -1999,7 +1999,7 @@ void test_vluxseg6ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_mu(
@@ -2020,7 +2020,7 @@ void test_vluxseg6ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_mu(
@@ -2041,7 +2041,7 @@ void test_vluxseg6ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_mu(
@@ -2062,7 +2062,7 @@ void test_vluxseg6ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_mu(
@@ -2083,7 +2083,7 @@ void test_vluxseg6ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg6ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_mu(
@@ -2125,7 +2125,7 @@ void test_vluxseg6ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_mu(
@@ -2146,7 +2146,7 @@ void test_vluxseg6ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_mu(
@@ -2167,7 +2167,7 @@ void test_vluxseg6ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_mu(
@@ -2188,6 +2188,6 @@ void test_vluxseg6ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c
index 80e26c3c91e2..55642dcb8475 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vluxseg6ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vluxseg6ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_tu(
@@ -88,7 +88,7 @@ void test_vluxseg6ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_tu(
@@ -109,7 +109,7 @@ void test_vluxseg6ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_tu(
@@ -130,7 +130,7 @@ void test_vluxseg6ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_tu(
@@ -151,7 +151,7 @@ void test_vluxseg6ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_tu(
@@ -172,7 +172,7 @@ void test_vluxseg6ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_tu(
@@ -193,7 +193,7 @@ void test_vluxseg6ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_tu(
@@ -214,7 +214,7 @@ void test_vluxseg6ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_tu(
@@ -235,7 +235,7 @@ void test_vluxseg6ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_tu(
@@ -256,7 +256,7 @@ void test_vluxseg6ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vluxseg6ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_tu(
@@ -298,7 +298,7 @@ void test_vluxseg6ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_tu(
@@ -319,7 +319,7 @@ void test_vluxseg6ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_tu(
@@ -340,7 +340,7 @@ void test_vluxseg6ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_tu(
@@ -361,7 +361,7 @@ void test_vluxseg6ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_tu(
@@ -382,7 +382,7 @@ void test_vluxseg6ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_tu(
@@ -403,7 +403,7 @@ void test_vluxseg6ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_tu(
@@ -424,7 +424,7 @@ void test_vluxseg6ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_tu(
@@ -445,7 +445,7 @@ void test_vluxseg6ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_tu(
@@ -466,7 +466,7 @@ void test_vluxseg6ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_tu(
@@ -487,7 +487,7 @@ void test_vluxseg6ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_tu(
@@ -508,7 +508,7 @@ void test_vluxseg6ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_tu(
@@ -529,7 +529,7 @@ void test_vluxseg6ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_tu(
@@ -550,7 +550,7 @@ void test_vluxseg6ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_tum(
@@ -571,7 +571,7 @@ void test_vluxseg6ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_tum(
@@ -592,7 +592,7 @@ void test_vluxseg6ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_tum(
@@ -613,7 +613,7 @@ void test_vluxseg6ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vluxseg6ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_tum(
@@ -655,7 +655,7 @@ void test_vluxseg6ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_tum(
@@ -676,7 +676,7 @@ void test_vluxseg6ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_tum(
@@ -697,7 +697,7 @@ void test_vluxseg6ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_tum(
@@ -718,7 +718,7 @@ void test_vluxseg6ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vluxseg6ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_tum(
@@ -760,7 +760,7 @@ void test_vluxseg6ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_tum(
@@ -781,7 +781,7 @@ void test_vluxseg6ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_tum(
@@ -802,7 +802,7 @@ void test_vluxseg6ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_tum(
@@ -823,7 +823,7 @@ void test_vluxseg6ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vluxseg6ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_tum(
@@ -865,7 +865,7 @@ void test_vluxseg6ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_tum(
@@ -886,7 +886,7 @@ void test_vluxseg6ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_tum(
@@ -907,7 +907,7 @@ void test_vluxseg6ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_tum(
@@ -928,7 +928,7 @@ void test_vluxseg6ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vluxseg6ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_tum(
@@ -970,7 +970,7 @@ void test_vluxseg6ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_tum(
@@ -991,7 +991,7 @@ void test_vluxseg6ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_tum(
@@ -1012,7 +1012,7 @@ void test_vluxseg6ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_tum(
@@ -1033,7 +1033,7 @@ void test_vluxseg6ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg6ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_tum(
@@ -1075,7 +1075,7 @@ void test_vluxseg6ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_tum(
@@ -1096,7 +1096,7 @@ void test_vluxseg6ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_tumu(
@@ -1117,7 +1117,7 @@ void test_vluxseg6ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_tumu(
@@ -1138,7 +1138,7 @@ void test_vluxseg6ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vluxseg6ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_tumu(
@@ -1180,7 +1180,7 @@ void test_vluxseg6ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_tumu(
@@ -1201,7 +1201,7 @@ void test_vluxseg6ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_tumu(
@@ -1222,7 +1222,7 @@ void test_vluxseg6ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_tumu(
@@ -1243,7 +1243,7 @@ void test_vluxseg6ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vluxseg6ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_tumu(
@@ -1285,7 +1285,7 @@ void test_vluxseg6ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_tumu(
@@ -1306,7 +1306,7 @@ void test_vluxseg6ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_tumu(
@@ -1327,7 +1327,7 @@ void test_vluxseg6ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_tumu(
@@ -1348,7 +1348,7 @@ void test_vluxseg6ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg6ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_tumu(
@@ -1390,7 +1390,7 @@ void test_vluxseg6ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_tumu(
@@ -1411,7 +1411,7 @@ void test_vluxseg6ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_tumu(
@@ -1432,7 +1432,7 @@ void test_vluxseg6ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_tumu(
@@ -1453,7 +1453,7 @@ void test_vluxseg6ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_tumu(
@@ -1474,7 +1474,7 @@ void test_vluxseg6ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_tumu(
@@ -1495,7 +1495,7 @@ void test_vluxseg6ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_tumu(
@@ -1516,7 +1516,7 @@ void test_vluxseg6ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_tumu(
@@ -1537,7 +1537,7 @@ void test_vluxseg6ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_tumu(
@@ -1558,7 +1558,7 @@ void test_vluxseg6ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg6ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_tumu(
@@ -1600,7 +1600,7 @@ void test_vluxseg6ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_tumu(
@@ -1621,7 +1621,7 @@ void test_vluxseg6ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_tumu(
@@ -1642,7 +1642,7 @@ void test_vluxseg6ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_mu(
@@ -1663,7 +1663,7 @@ void test_vluxseg6ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_mu(
@@ -1684,7 +1684,7 @@ void test_vluxseg6ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_mu(
@@ -1705,7 +1705,7 @@ void test_vluxseg6ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_mu(
@@ -1726,7 +1726,7 @@ void test_vluxseg6ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_mu(
@@ -1747,7 +1747,7 @@ void test_vluxseg6ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_mu(
@@ -1768,7 +1768,7 @@ void test_vluxseg6ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_mu(
@@ -1789,7 +1789,7 @@ void test_vluxseg6ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_mu(
@@ -1810,7 +1810,7 @@ void test_vluxseg6ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_mu(
@@ -1831,7 +1831,7 @@ void test_vluxseg6ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_mu(
@@ -1852,7 +1852,7 @@ void test_vluxseg6ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_mu(
@@ -1873,7 +1873,7 @@ void test_vluxseg6ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_mu(
@@ -1894,7 +1894,7 @@ void test_vluxseg6ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vluxseg6ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_mu(
@@ -1936,7 +1936,7 @@ void test_vluxseg6ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_mu(
@@ -1957,7 +1957,7 @@ void test_vluxseg6ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_mu(
@@ -1978,7 +1978,7 @@ void test_vluxseg6ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_mu(
@@ -1999,7 +1999,7 @@ void test_vluxseg6ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_mu(
@@ -2020,7 +2020,7 @@ void test_vluxseg6ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_mu(
@@ -2041,7 +2041,7 @@ void test_vluxseg6ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_mu(
@@ -2062,7 +2062,7 @@ void test_vluxseg6ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_mu(
@@ -2083,7 +2083,7 @@ void test_vluxseg6ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg6ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_mu(
@@ -2125,7 +2125,7 @@ void test_vluxseg6ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_mu(
@@ -2146,7 +2146,7 @@ void test_vluxseg6ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_mu(
@@ -2167,7 +2167,7 @@ void test_vluxseg6ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_mu(
@@ -2188,6 +2188,6 @@ void test_vluxseg6ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c
index ea84f1f57f41..71dc728ac4a8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vluxseg6ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vluxseg6ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_tu(
@@ -88,7 +88,7 @@ void test_vluxseg6ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_tu(
@@ -109,7 +109,7 @@ void test_vluxseg6ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_tu(
@@ -130,7 +130,7 @@ void test_vluxseg6ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_tu(
@@ -151,7 +151,7 @@ void test_vluxseg6ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_tu(
@@ -172,7 +172,7 @@ void test_vluxseg6ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_tu(
@@ -193,7 +193,7 @@ void test_vluxseg6ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_tu(
@@ -214,7 +214,7 @@ void test_vluxseg6ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_tu(
@@ -235,7 +235,7 @@ void test_vluxseg6ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_tu(
@@ -256,7 +256,7 @@ void test_vluxseg6ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vluxseg6ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_tu(
@@ -298,7 +298,7 @@ void test_vluxseg6ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_tu(
@@ -319,7 +319,7 @@ void test_vluxseg6ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_tu(
@@ -340,7 +340,7 @@ void test_vluxseg6ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_tu(
@@ -361,7 +361,7 @@ void test_vluxseg6ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_tu(
@@ -382,7 +382,7 @@ void test_vluxseg6ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_tu(
@@ -403,7 +403,7 @@ void test_vluxseg6ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_tu(
@@ -424,7 +424,7 @@ void test_vluxseg6ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_tu(
@@ -445,7 +445,7 @@ void test_vluxseg6ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_tu(
@@ -466,7 +466,7 @@ void test_vluxseg6ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_tu(
@@ -487,7 +487,7 @@ void test_vluxseg6ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_tu(
@@ -508,7 +508,7 @@ void test_vluxseg6ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_tu(
@@ -529,7 +529,7 @@ void test_vluxseg6ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_tu(
@@ -550,7 +550,7 @@ void test_vluxseg6ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_tum(
@@ -571,7 +571,7 @@ void test_vluxseg6ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_tum(
@@ -592,7 +592,7 @@ void test_vluxseg6ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_tum(
@@ -613,7 +613,7 @@ void test_vluxseg6ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vluxseg6ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_tum(
@@ -655,7 +655,7 @@ void test_vluxseg6ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_tum(
@@ -676,7 +676,7 @@ void test_vluxseg6ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_tum(
@@ -697,7 +697,7 @@ void test_vluxseg6ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_tum(
@@ -718,7 +718,7 @@ void test_vluxseg6ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vluxseg6ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_tum(
@@ -760,7 +760,7 @@ void test_vluxseg6ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_tum(
@@ -781,7 +781,7 @@ void test_vluxseg6ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_tum(
@@ -802,7 +802,7 @@ void test_vluxseg6ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_tum(
@@ -823,7 +823,7 @@ void test_vluxseg6ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vluxseg6ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_tum(
@@ -865,7 +865,7 @@ void test_vluxseg6ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_tum(
@@ -886,7 +886,7 @@ void test_vluxseg6ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_tum(
@@ -907,7 +907,7 @@ void test_vluxseg6ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_tum(
@@ -928,7 +928,7 @@ void test_vluxseg6ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vluxseg6ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_tum(
@@ -970,7 +970,7 @@ void test_vluxseg6ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_tum(
@@ -991,7 +991,7 @@ void test_vluxseg6ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_tum(
@@ -1012,7 +1012,7 @@ void test_vluxseg6ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_tum(
@@ -1033,7 +1033,7 @@ void test_vluxseg6ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg6ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_tum(
@@ -1075,7 +1075,7 @@ void test_vluxseg6ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_tum(
@@ -1096,7 +1096,7 @@ void test_vluxseg6ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_tumu(
@@ -1117,7 +1117,7 @@ void test_vluxseg6ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_tumu(
@@ -1138,7 +1138,7 @@ void test_vluxseg6ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vluxseg6ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_tumu(
@@ -1180,7 +1180,7 @@ void test_vluxseg6ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_tumu(
@@ -1201,7 +1201,7 @@ void test_vluxseg6ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_tumu(
@@ -1222,7 +1222,7 @@ void test_vluxseg6ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_tumu(
@@ -1243,7 +1243,7 @@ void test_vluxseg6ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vluxseg6ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_tumu(
@@ -1285,7 +1285,7 @@ void test_vluxseg6ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_tumu(
@@ -1306,7 +1306,7 @@ void test_vluxseg6ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_tumu(
@@ -1327,7 +1327,7 @@ void test_vluxseg6ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_tumu(
@@ -1348,7 +1348,7 @@ void test_vluxseg6ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg6ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_tumu(
@@ -1390,7 +1390,7 @@ void test_vluxseg6ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_tumu(
@@ -1411,7 +1411,7 @@ void test_vluxseg6ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_tumu(
@@ -1432,7 +1432,7 @@ void test_vluxseg6ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_tumu(
@@ -1453,7 +1453,7 @@ void test_vluxseg6ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_tumu(
@@ -1474,7 +1474,7 @@ void test_vluxseg6ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_tumu(
@@ -1495,7 +1495,7 @@ void test_vluxseg6ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_tumu(
@@ -1516,7 +1516,7 @@ void test_vluxseg6ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_tumu(
@@ -1537,7 +1537,7 @@ void test_vluxseg6ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_tumu(
@@ -1558,7 +1558,7 @@ void test_vluxseg6ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg6ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_tumu(
@@ -1600,7 +1600,7 @@ void test_vluxseg6ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_tumu(
@@ -1621,7 +1621,7 @@ void test_vluxseg6ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_tumu(
@@ -1642,7 +1642,7 @@ void test_vluxseg6ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_mu(
@@ -1663,7 +1663,7 @@ void test_vluxseg6ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_mu(
@@ -1684,7 +1684,7 @@ void test_vluxseg6ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_mu(
@@ -1705,7 +1705,7 @@ void test_vluxseg6ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_mu(
@@ -1726,7 +1726,7 @@ void test_vluxseg6ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_mu(
@@ -1747,7 +1747,7 @@ void test_vluxseg6ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_mu(
@@ -1768,7 +1768,7 @@ void test_vluxseg6ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_mu(
@@ -1789,7 +1789,7 @@ void test_vluxseg6ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_mu(
@@ -1810,7 +1810,7 @@ void test_vluxseg6ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_mu(
@@ -1831,7 +1831,7 @@ void test_vluxseg6ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_mu(
@@ -1852,7 +1852,7 @@ void test_vluxseg6ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_mu(
@@ -1873,7 +1873,7 @@ void test_vluxseg6ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_mu(
@@ -1894,7 +1894,7 @@ void test_vluxseg6ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vluxseg6ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_mu(
@@ -1936,7 +1936,7 @@ void test_vluxseg6ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_mu(
@@ -1957,7 +1957,7 @@ void test_vluxseg6ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_mu(
@@ -1978,7 +1978,7 @@ void test_vluxseg6ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_mu(
@@ -1999,7 +1999,7 @@ void test_vluxseg6ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_mu(
@@ -2020,7 +2020,7 @@ void test_vluxseg6ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_mu(
@@ -2041,7 +2041,7 @@ void test_vluxseg6ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_mu(
@@ -2062,7 +2062,7 @@ void test_vluxseg6ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_mu(
@@ -2083,7 +2083,7 @@ void test_vluxseg6ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg6ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_mu(
@@ -2125,7 +2125,7 @@ void test_vluxseg6ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_mu(
@@ -2146,7 +2146,7 @@ void test_vluxseg6ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_mu(
@@ -2167,7 +2167,7 @@ void test_vluxseg6ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_mu(
@@ -2188,6 +2188,6 @@ void test_vluxseg6ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c
index e07ae81e2cd1..2e24fb9bf64d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c
@@ -25,7 +25,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_tu(
@@ -46,7 +46,7 @@ void test_vluxseg6ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_tu(
@@ -67,7 +67,7 @@ void test_vluxseg6ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_tu(
@@ -88,7 +88,7 @@ void test_vluxseg6ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_tu(
@@ -109,7 +109,7 @@ void test_vluxseg6ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_tu(
@@ -130,7 +130,7 @@ void test_vluxseg6ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_tu(
@@ -151,7 +151,7 @@ void test_vluxseg6ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_tu(
@@ -172,7 +172,7 @@ void test_vluxseg6ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_tu(
@@ -193,7 +193,7 @@ void test_vluxseg6ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_tu(
@@ -214,7 +214,7 @@ void test_vluxseg6ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_tu(
@@ -235,7 +235,7 @@ void test_vluxseg6ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_tu(
@@ -256,7 +256,7 @@ void test_vluxseg6ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_tu(
@@ -277,7 +277,7 @@ void test_vluxseg6ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_tu(
@@ -298,7 +298,7 @@ void test_vluxseg6ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_tu(
@@ -319,7 +319,7 @@ void test_vluxseg6ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_tu(
@@ -340,7 +340,7 @@ void test_vluxseg6ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_tu(
@@ -361,7 +361,7 @@ void test_vluxseg6ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_tu(
@@ -382,7 +382,7 @@ void test_vluxseg6ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_tu(
@@ -403,7 +403,7 @@ void test_vluxseg6ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_tu(
@@ -424,7 +424,7 @@ void test_vluxseg6ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_tu(
@@ -445,7 +445,7 @@ void test_vluxseg6ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_tu(
@@ -466,7 +466,7 @@ void test_vluxseg6ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_tu(
@@ -487,7 +487,7 @@ void test_vluxseg6ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_tu(
@@ -508,7 +508,7 @@ void test_vluxseg6ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_tu(
@@ -529,7 +529,7 @@ void test_vluxseg6ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_tu(
@@ -550,7 +550,7 @@ void test_vluxseg6ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_tum(
@@ -571,7 +571,7 @@ void test_vluxseg6ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_tum(
@@ -592,7 +592,7 @@ void test_vluxseg6ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_tum(
@@ -613,7 +613,7 @@ void test_vluxseg6ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_tum(
@@ -634,7 +634,7 @@ void test_vluxseg6ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_tum(
@@ -655,7 +655,7 @@ void test_vluxseg6ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_tum(
@@ -676,7 +676,7 @@ void test_vluxseg6ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_tum(
@@ -697,7 +697,7 @@ void test_vluxseg6ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_tum(
@@ -718,7 +718,7 @@ void test_vluxseg6ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_tum(
@@ -739,7 +739,7 @@ void test_vluxseg6ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_tum(
@@ -760,7 +760,7 @@ void test_vluxseg6ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_tum(
@@ -781,7 +781,7 @@ void test_vluxseg6ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_tum(
@@ -802,7 +802,7 @@ void test_vluxseg6ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_tum(
@@ -823,7 +823,7 @@ void test_vluxseg6ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_tum(
@@ -844,7 +844,7 @@ void test_vluxseg6ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_tum(
@@ -865,7 +865,7 @@ void test_vluxseg6ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_tum(
@@ -886,7 +886,7 @@ void test_vluxseg6ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_tum(
@@ -907,7 +907,7 @@ void test_vluxseg6ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_tum(
@@ -928,7 +928,7 @@ void test_vluxseg6ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_tum(
@@ -949,7 +949,7 @@ void test_vluxseg6ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_tum(
@@ -970,7 +970,7 @@ void test_vluxseg6ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_tum(
@@ -991,7 +991,7 @@ void test_vluxseg6ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_tum(
@@ -1012,7 +1012,7 @@ void test_vluxseg6ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_tum(
@@ -1033,7 +1033,7 @@ void test_vluxseg6ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg6ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_tum(
@@ -1075,7 +1075,7 @@ void test_vluxseg6ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_tum(
@@ -1096,7 +1096,7 @@ void test_vluxseg6ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_tumu(
@@ -1117,7 +1117,7 @@ void test_vluxseg6ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_tumu(
@@ -1138,7 +1138,7 @@ void test_vluxseg6ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_tumu(
@@ -1159,7 +1159,7 @@ void test_vluxseg6ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_tumu(
@@ -1180,7 +1180,7 @@ void test_vluxseg6ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_tumu(
@@ -1201,7 +1201,7 @@ void test_vluxseg6ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_tumu(
@@ -1222,7 +1222,7 @@ void test_vluxseg6ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_tumu(
@@ -1243,7 +1243,7 @@ void test_vluxseg6ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_tumu(
@@ -1264,7 +1264,7 @@ void test_vluxseg6ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_tumu(
@@ -1285,7 +1285,7 @@ void test_vluxseg6ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_tumu(
@@ -1306,7 +1306,7 @@ void test_vluxseg6ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_tumu(
@@ -1327,7 +1327,7 @@ void test_vluxseg6ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_tumu(
@@ -1348,7 +1348,7 @@ void test_vluxseg6ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_tumu(
@@ -1369,7 +1369,7 @@ void test_vluxseg6ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_tumu(
@@ -1390,7 +1390,7 @@ void test_vluxseg6ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_tumu(
@@ -1411,7 +1411,7 @@ void test_vluxseg6ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_tumu(
@@ -1432,7 +1432,7 @@ void test_vluxseg6ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_tumu(
@@ -1453,7 +1453,7 @@ void test_vluxseg6ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_tumu(
@@ -1474,7 +1474,7 @@ void test_vluxseg6ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_tumu(
@@ -1495,7 +1495,7 @@ void test_vluxseg6ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_tumu(
@@ -1516,7 +1516,7 @@ void test_vluxseg6ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_tumu(
@@ -1537,7 +1537,7 @@ void test_vluxseg6ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_tumu(
@@ -1558,7 +1558,7 @@ void test_vluxseg6ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg6ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_tumu(
@@ -1600,7 +1600,7 @@ void test_vluxseg6ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_tumu(
@@ -1621,7 +1621,7 @@ void test_vluxseg6ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_tumu(
@@ -1642,7 +1642,7 @@ void test_vluxseg6ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_mu(
@@ -1663,7 +1663,7 @@ void test_vluxseg6ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_mu(
@@ -1684,7 +1684,7 @@ void test_vluxseg6ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_mu(
@@ -1705,7 +1705,7 @@ void test_vluxseg6ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_mu(
@@ -1726,7 +1726,7 @@ void test_vluxseg6ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_mu(
@@ -1747,7 +1747,7 @@ void test_vluxseg6ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_mu(
@@ -1768,7 +1768,7 @@ void test_vluxseg6ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_mu(
@@ -1789,7 +1789,7 @@ void test_vluxseg6ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_mu(
@@ -1810,7 +1810,7 @@ void test_vluxseg6ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_mu(
@@ -1831,7 +1831,7 @@ void test_vluxseg6ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_mu(
@@ -1852,7 +1852,7 @@ void test_vluxseg6ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_mu(
@@ -1873,7 +1873,7 @@ void test_vluxseg6ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_mu(
@@ -1894,7 +1894,7 @@ void test_vluxseg6ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_mu(
@@ -1915,7 +1915,7 @@ void test_vluxseg6ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_mu(
@@ -1936,7 +1936,7 @@ void test_vluxseg6ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_mu(
@@ -1957,7 +1957,7 @@ void test_vluxseg6ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_mu(
@@ -1978,7 +1978,7 @@ void test_vluxseg6ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_mu(
@@ -1999,7 +1999,7 @@ void test_vluxseg6ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_mu(
@@ -2020,7 +2020,7 @@ void test_vluxseg6ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_mu(
@@ -2041,7 +2041,7 @@ void test_vluxseg6ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_mu(
@@ -2062,7 +2062,7 @@ void test_vluxseg6ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_mu(
@@ -2083,7 +2083,7 @@ void test_vluxseg6ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg6ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_mu(
@@ -2125,7 +2125,7 @@ void test_vluxseg6ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_mu(
@@ -2146,7 +2146,7 @@ void test_vluxseg6ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_mu(
@@ -2167,7 +2167,7 @@ void test_vluxseg6ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_mu(
@@ -2188,6 +2188,6 @@ void test_vluxseg6ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg6ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+ return __riscv_vluxseg6ei8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c
index 1ab248a2542b..03dd058d84b6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vluxseg7ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vluxseg7ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_tu(
@@ -96,7 +96,7 @@ void test_vluxseg7ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_tu(
@@ -119,7 +119,7 @@ void test_vluxseg7ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_tu(
@@ -142,7 +142,7 @@ void test_vluxseg7ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_tu(
@@ -165,7 +165,7 @@ void test_vluxseg7ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_tu(
@@ -188,7 +188,7 @@ void test_vluxseg7ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_tu(
@@ -211,7 +211,7 @@ void test_vluxseg7ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_tu(
@@ -234,7 +234,7 @@ void test_vluxseg7ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_tu(
@@ -257,7 +257,7 @@ void test_vluxseg7ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_tu(
@@ -280,7 +280,7 @@ void test_vluxseg7ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_tu(
@@ -303,7 +303,7 @@ void test_vluxseg7ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_tu(
@@ -326,7 +326,7 @@ void test_vluxseg7ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_tu(
@@ -349,7 +349,7 @@ void test_vluxseg7ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_tu(
@@ -372,7 +372,7 @@ void test_vluxseg7ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vluxseg7ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_tu(
@@ -418,7 +418,7 @@ void test_vluxseg7ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_tu(
@@ -441,7 +441,7 @@ void test_vluxseg7ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_tu(
@@ -464,7 +464,7 @@ void test_vluxseg7ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_tu(
@@ -487,7 +487,7 @@ void test_vluxseg7ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_tu(
@@ -510,7 +510,7 @@ void test_vluxseg7ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_tu(
@@ -533,7 +533,7 @@ void test_vluxseg7ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_tu(
@@ -556,7 +556,7 @@ void test_vluxseg7ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_tu(
@@ -579,7 +579,7 @@ void test_vluxseg7ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vluxseg7ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_tum(
@@ -625,7 +625,7 @@ void test_vluxseg7ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_tum(
@@ -648,7 +648,7 @@ void test_vluxseg7ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_tum(
@@ -671,7 +671,7 @@ void test_vluxseg7ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_tum(
@@ -694,7 +694,7 @@ void test_vluxseg7ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_tum(
@@ -717,7 +717,7 @@ void test_vluxseg7ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_tum(
@@ -740,7 +740,7 @@ void test_vluxseg7ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_tum(
@@ -763,7 +763,7 @@ void test_vluxseg7ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vluxseg7ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_tum(
@@ -809,7 +809,7 @@ void test_vluxseg7ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_tum(
@@ -832,7 +832,7 @@ void test_vluxseg7ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_tum(
@@ -855,7 +855,7 @@ void test_vluxseg7ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_tum(
@@ -878,7 +878,7 @@ void test_vluxseg7ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vluxseg7ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_tum(
@@ -924,7 +924,7 @@ void test_vluxseg7ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_tum(
@@ -947,7 +947,7 @@ void test_vluxseg7ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_tum(
@@ -970,7 +970,7 @@ void test_vluxseg7ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_tum(
@@ -993,7 +993,7 @@ void test_vluxseg7ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_tum(
@@ -1016,7 +1016,7 @@ void test_vluxseg7ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_tum(
@@ -1039,7 +1039,7 @@ void test_vluxseg7ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_tum(
@@ -1062,7 +1062,7 @@ void test_vluxseg7ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_tum(
@@ -1085,7 +1085,7 @@ void test_vluxseg7ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_tum(
@@ -1108,7 +1108,7 @@ void test_vluxseg7ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_tum(
@@ -1131,7 +1131,7 @@ void test_vluxseg7ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_tum(
@@ -1154,7 +1154,7 @@ void test_vluxseg7ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_tum(
@@ -1177,7 +1177,7 @@ void test_vluxseg7ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_tum(
@@ -1200,7 +1200,7 @@ void test_vluxseg7ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_tumu(
@@ -1223,7 +1223,7 @@ void test_vluxseg7ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_tumu(
@@ -1246,7 +1246,7 @@ void test_vluxseg7ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_tumu(
@@ -1269,7 +1269,7 @@ void test_vluxseg7ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_tumu(
@@ -1292,7 +1292,7 @@ void test_vluxseg7ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_tumu(
@@ -1315,7 +1315,7 @@ void test_vluxseg7ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_tumu(
@@ -1338,7 +1338,7 @@ void test_vluxseg7ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_tumu(
@@ -1361,7 +1361,7 @@ void test_vluxseg7ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_tumu(
@@ -1384,7 +1384,7 @@ void test_vluxseg7ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_tumu(
@@ -1407,7 +1407,7 @@ void test_vluxseg7ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_tumu(
@@ -1430,7 +1430,7 @@ void test_vluxseg7ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_tumu(
@@ -1453,7 +1453,7 @@ void test_vluxseg7ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_tumu(
@@ -1476,7 +1476,7 @@ void test_vluxseg7ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vluxseg7ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_tumu(
@@ -1522,7 +1522,7 @@ void test_vluxseg7ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_tumu(
@@ -1545,7 +1545,7 @@ void test_vluxseg7ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_tumu(
@@ -1568,7 +1568,7 @@ void test_vluxseg7ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_tumu(
@@ -1591,7 +1591,7 @@ void test_vluxseg7ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_tumu(
@@ -1614,7 +1614,7 @@ void test_vluxseg7ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_tumu(
@@ -1637,7 +1637,7 @@ void test_vluxseg7ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_tumu(
@@ -1660,7 +1660,7 @@ void test_vluxseg7ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_tumu(
@@ -1683,7 +1683,7 @@ void test_vluxseg7ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_tumu(
@@ -1706,7 +1706,7 @@ void test_vluxseg7ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_tumu(
@@ -1729,7 +1729,7 @@ void test_vluxseg7ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_tumu(
@@ -1752,7 +1752,7 @@ void test_vluxseg7ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_tumu(
@@ -1775,7 +1775,7 @@ void test_vluxseg7ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_tumu(
@@ -1798,7 +1798,7 @@ void test_vluxseg7ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_mu(
@@ -1821,7 +1821,7 @@ void test_vluxseg7ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_mu(
@@ -1844,7 +1844,7 @@ void test_vluxseg7ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_mu(
@@ -1867,7 +1867,7 @@ void test_vluxseg7ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_mu(
@@ -1890,7 +1890,7 @@ void test_vluxseg7ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_mu(
@@ -1913,7 +1913,7 @@ void test_vluxseg7ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_mu(
@@ -1936,7 +1936,7 @@ void test_vluxseg7ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vluxseg7ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_mu(
@@ -1982,7 +1982,7 @@ void test_vluxseg7ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_mu(
@@ -2005,7 +2005,7 @@ void test_vluxseg7ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_mu(
@@ -2028,7 +2028,7 @@ void test_vluxseg7ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_mu(
@@ -2051,7 +2051,7 @@ void test_vluxseg7ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_mu(
@@ -2074,7 +2074,7 @@ void test_vluxseg7ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_mu(
@@ -2097,7 +2097,7 @@ void test_vluxseg7ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_mu(
@@ -2120,7 +2120,7 @@ void test_vluxseg7ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_mu(
@@ -2143,7 +2143,7 @@ void test_vluxseg7ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_mu(
@@ -2166,7 +2166,7 @@ void test_vluxseg7ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_mu(
@@ -2189,7 +2189,7 @@ void test_vluxseg7ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_mu(
@@ -2212,7 +2212,7 @@ void test_vluxseg7ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_mu(
@@ -2235,7 +2235,7 @@ void test_vluxseg7ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_mu(
@@ -2258,7 +2258,7 @@ void test_vluxseg7ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_mu(
@@ -2281,7 +2281,7 @@ void test_vluxseg7ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vluxseg7ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_mu(
@@ -2327,7 +2327,7 @@ void test_vluxseg7ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_mu(
@@ -2350,7 +2350,7 @@ void test_vluxseg7ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_mu(
@@ -2373,7 +2373,7 @@ void test_vluxseg7ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_mu(
@@ -2396,6 +2396,6 @@ void test_vluxseg7ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c
index 160222ba2eba..b39d75c3fffb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vluxseg7ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vluxseg7ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_tu(
@@ -96,7 +96,7 @@ void test_vluxseg7ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_tu(
@@ -119,7 +119,7 @@ void test_vluxseg7ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_tu(
@@ -142,7 +142,7 @@ void test_vluxseg7ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_tu(
@@ -165,7 +165,7 @@ void test_vluxseg7ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_tu(
@@ -188,7 +188,7 @@ void test_vluxseg7ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_tu(
@@ -211,7 +211,7 @@ void test_vluxseg7ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_tu(
@@ -234,7 +234,7 @@ void test_vluxseg7ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_tu(
@@ -257,7 +257,7 @@ void test_vluxseg7ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_tu(
@@ -280,7 +280,7 @@ void test_vluxseg7ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_tu(
@@ -303,7 +303,7 @@ void test_vluxseg7ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_tu(
@@ -326,7 +326,7 @@ void test_vluxseg7ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_tu(
@@ -349,7 +349,7 @@ void test_vluxseg7ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_tu(
@@ -372,7 +372,7 @@ void test_vluxseg7ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vluxseg7ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_tu(
@@ -418,7 +418,7 @@ void test_vluxseg7ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_tu(
@@ -441,7 +441,7 @@ void test_vluxseg7ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_tu(
@@ -464,7 +464,7 @@ void test_vluxseg7ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_tu(
@@ -487,7 +487,7 @@ void test_vluxseg7ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_tu(
@@ -510,7 +510,7 @@ void test_vluxseg7ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_tu(
@@ -533,7 +533,7 @@ void test_vluxseg7ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_tu(
@@ -556,7 +556,7 @@ void test_vluxseg7ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_tu(
@@ -579,7 +579,7 @@ void test_vluxseg7ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vluxseg7ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_tum(
@@ -625,7 +625,7 @@ void test_vluxseg7ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_tum(
@@ -648,7 +648,7 @@ void test_vluxseg7ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_tum(
@@ -671,7 +671,7 @@ void test_vluxseg7ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_tum(
@@ -694,7 +694,7 @@ void test_vluxseg7ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_tum(
@@ -717,7 +717,7 @@ void test_vluxseg7ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_tum(
@@ -740,7 +740,7 @@ void test_vluxseg7ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_tum(
@@ -763,7 +763,7 @@ void test_vluxseg7ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vluxseg7ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_tum(
@@ -809,7 +809,7 @@ void test_vluxseg7ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_tum(
@@ -832,7 +832,7 @@ void test_vluxseg7ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_tum(
@@ -855,7 +855,7 @@ void test_vluxseg7ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_tum(
@@ -878,7 +878,7 @@ void test_vluxseg7ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vluxseg7ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_tum(
@@ -924,7 +924,7 @@ void test_vluxseg7ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_tum(
@@ -947,7 +947,7 @@ void test_vluxseg7ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_tum(
@@ -970,7 +970,7 @@ void test_vluxseg7ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_tum(
@@ -993,7 +993,7 @@ void test_vluxseg7ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_tum(
@@ -1016,7 +1016,7 @@ void test_vluxseg7ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_tum(
@@ -1039,7 +1039,7 @@ void test_vluxseg7ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_tum(
@@ -1062,7 +1062,7 @@ void test_vluxseg7ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_tum(
@@ -1085,7 +1085,7 @@ void test_vluxseg7ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_tum(
@@ -1108,7 +1108,7 @@ void test_vluxseg7ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_tum(
@@ -1131,7 +1131,7 @@ void test_vluxseg7ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_tum(
@@ -1154,7 +1154,7 @@ void test_vluxseg7ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_tum(
@@ -1177,7 +1177,7 @@ void test_vluxseg7ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_tum(
@@ -1200,7 +1200,7 @@ void test_vluxseg7ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_tumu(
@@ -1223,7 +1223,7 @@ void test_vluxseg7ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_tumu(
@@ -1246,7 +1246,7 @@ void test_vluxseg7ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_tumu(
@@ -1269,7 +1269,7 @@ void test_vluxseg7ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_tumu(
@@ -1292,7 +1292,7 @@ void test_vluxseg7ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_tumu(
@@ -1315,7 +1315,7 @@ void test_vluxseg7ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_tumu(
@@ -1338,7 +1338,7 @@ void test_vluxseg7ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_tumu(
@@ -1361,7 +1361,7 @@ void test_vluxseg7ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_tumu(
@@ -1384,7 +1384,7 @@ void test_vluxseg7ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_tumu(
@@ -1407,7 +1407,7 @@ void test_vluxseg7ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_tumu(
@@ -1430,7 +1430,7 @@ void test_vluxseg7ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_tumu(
@@ -1453,7 +1453,7 @@ void test_vluxseg7ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_tumu(
@@ -1476,7 +1476,7 @@ void test_vluxseg7ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vluxseg7ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_tumu(
@@ -1522,7 +1522,7 @@ void test_vluxseg7ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_tumu(
@@ -1545,7 +1545,7 @@ void test_vluxseg7ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_tumu(
@@ -1568,7 +1568,7 @@ void test_vluxseg7ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_tumu(
@@ -1591,7 +1591,7 @@ void test_vluxseg7ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_tumu(
@@ -1614,7 +1614,7 @@ void test_vluxseg7ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_tumu(
@@ -1637,7 +1637,7 @@ void test_vluxseg7ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_tumu(
@@ -1660,7 +1660,7 @@ void test_vluxseg7ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_tumu(
@@ -1683,7 +1683,7 @@ void test_vluxseg7ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_tumu(
@@ -1706,7 +1706,7 @@ void test_vluxseg7ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_tumu(
@@ -1729,7 +1729,7 @@ void test_vluxseg7ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_tumu(
@@ -1752,7 +1752,7 @@ void test_vluxseg7ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_tumu(
@@ -1775,7 +1775,7 @@ void test_vluxseg7ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_tumu(
@@ -1798,7 +1798,7 @@ void test_vluxseg7ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_mu(
@@ -1821,7 +1821,7 @@ void test_vluxseg7ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_mu(
@@ -1844,7 +1844,7 @@ void test_vluxseg7ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_mu(
@@ -1867,7 +1867,7 @@ void test_vluxseg7ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_mu(
@@ -1890,7 +1890,7 @@ void test_vluxseg7ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_mu(
@@ -1913,7 +1913,7 @@ void test_vluxseg7ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_mu(
@@ -1936,7 +1936,7 @@ void test_vluxseg7ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vluxseg7ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_mu(
@@ -1982,7 +1982,7 @@ void test_vluxseg7ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_mu(
@@ -2005,7 +2005,7 @@ void test_vluxseg7ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_mu(
@@ -2028,7 +2028,7 @@ void test_vluxseg7ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_mu(
@@ -2051,7 +2051,7 @@ void test_vluxseg7ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_mu(
@@ -2074,7 +2074,7 @@ void test_vluxseg7ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_mu(
@@ -2097,7 +2097,7 @@ void test_vluxseg7ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_mu(
@@ -2120,7 +2120,7 @@ void test_vluxseg7ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_mu(
@@ -2143,7 +2143,7 @@ void test_vluxseg7ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_mu(
@@ -2166,7 +2166,7 @@ void test_vluxseg7ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_mu(
@@ -2189,7 +2189,7 @@ void test_vluxseg7ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_mu(
@@ -2212,7 +2212,7 @@ void test_vluxseg7ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_mu(
@@ -2235,7 +2235,7 @@ void test_vluxseg7ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_mu(
@@ -2258,7 +2258,7 @@ void test_vluxseg7ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_mu(
@@ -2281,7 +2281,7 @@ void test_vluxseg7ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vluxseg7ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_mu(
@@ -2327,7 +2327,7 @@ void test_vluxseg7ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_mu(
@@ -2350,7 +2350,7 @@ void test_vluxseg7ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_mu(
@@ -2373,7 +2373,7 @@ void test_vluxseg7ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_mu(
@@ -2396,6 +2396,6 @@ void test_vluxseg7ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c
index 8b7b2fa51635..d5efa1053336 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vluxseg7ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vluxseg7ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_tu(
@@ -96,7 +96,7 @@ void test_vluxseg7ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_tu(
@@ -119,7 +119,7 @@ void test_vluxseg7ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_tu(
@@ -142,7 +142,7 @@ void test_vluxseg7ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_tu(
@@ -165,7 +165,7 @@ void test_vluxseg7ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_tu(
@@ -188,7 +188,7 @@ void test_vluxseg7ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_tu(
@@ -211,7 +211,7 @@ void test_vluxseg7ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_tu(
@@ -234,7 +234,7 @@ void test_vluxseg7ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_tu(
@@ -257,7 +257,7 @@ void test_vluxseg7ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_tu(
@@ -280,7 +280,7 @@ void test_vluxseg7ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_tu(
@@ -303,7 +303,7 @@ void test_vluxseg7ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_tu(
@@ -326,7 +326,7 @@ void test_vluxseg7ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_tu(
@@ -349,7 +349,7 @@ void test_vluxseg7ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_tu(
@@ -372,7 +372,7 @@ void test_vluxseg7ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vluxseg7ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_tu(
@@ -418,7 +418,7 @@ void test_vluxseg7ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_tu(
@@ -441,7 +441,7 @@ void test_vluxseg7ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_tu(
@@ -464,7 +464,7 @@ void test_vluxseg7ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_tu(
@@ -487,7 +487,7 @@ void test_vluxseg7ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_tu(
@@ -510,7 +510,7 @@ void test_vluxseg7ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_tu(
@@ -533,7 +533,7 @@ void test_vluxseg7ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_tu(
@@ -556,7 +556,7 @@ void test_vluxseg7ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_tu(
@@ -579,7 +579,7 @@ void test_vluxseg7ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vluxseg7ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_tum(
@@ -625,7 +625,7 @@ void test_vluxseg7ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_tum(
@@ -648,7 +648,7 @@ void test_vluxseg7ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_tum(
@@ -671,7 +671,7 @@ void test_vluxseg7ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_tum(
@@ -694,7 +694,7 @@ void test_vluxseg7ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_tum(
@@ -717,7 +717,7 @@ void test_vluxseg7ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_tum(
@@ -740,7 +740,7 @@ void test_vluxseg7ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_tum(
@@ -763,7 +763,7 @@ void test_vluxseg7ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vluxseg7ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_tum(
@@ -809,7 +809,7 @@ void test_vluxseg7ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_tum(
@@ -832,7 +832,7 @@ void test_vluxseg7ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_tum(
@@ -855,7 +855,7 @@ void test_vluxseg7ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_tum(
@@ -878,7 +878,7 @@ void test_vluxseg7ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vluxseg7ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_tum(
@@ -924,7 +924,7 @@ void test_vluxseg7ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_tum(
@@ -947,7 +947,7 @@ void test_vluxseg7ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_tum(
@@ -970,7 +970,7 @@ void test_vluxseg7ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_tum(
@@ -993,7 +993,7 @@ void test_vluxseg7ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_tum(
@@ -1016,7 +1016,7 @@ void test_vluxseg7ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_tum(
@@ -1039,7 +1039,7 @@ void test_vluxseg7ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_tum(
@@ -1062,7 +1062,7 @@ void test_vluxseg7ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_tum(
@@ -1085,7 +1085,7 @@ void test_vluxseg7ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_tum(
@@ -1108,7 +1108,7 @@ void test_vluxseg7ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_tum(
@@ -1131,7 +1131,7 @@ void test_vluxseg7ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_tum(
@@ -1154,7 +1154,7 @@ void test_vluxseg7ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_tum(
@@ -1177,7 +1177,7 @@ void test_vluxseg7ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_tum(
@@ -1200,7 +1200,7 @@ void test_vluxseg7ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_tumu(
@@ -1223,7 +1223,7 @@ void test_vluxseg7ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_tumu(
@@ -1246,7 +1246,7 @@ void test_vluxseg7ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_tumu(
@@ -1269,7 +1269,7 @@ void test_vluxseg7ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_tumu(
@@ -1292,7 +1292,7 @@ void test_vluxseg7ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_tumu(
@@ -1315,7 +1315,7 @@ void test_vluxseg7ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_tumu(
@@ -1338,7 +1338,7 @@ void test_vluxseg7ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_tumu(
@@ -1361,7 +1361,7 @@ void test_vluxseg7ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_tumu(
@@ -1384,7 +1384,7 @@ void test_vluxseg7ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_tumu(
@@ -1407,7 +1407,7 @@ void test_vluxseg7ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_tumu(
@@ -1430,7 +1430,7 @@ void test_vluxseg7ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_tumu(
@@ -1453,7 +1453,7 @@ void test_vluxseg7ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_tumu(
@@ -1476,7 +1476,7 @@ void test_vluxseg7ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vluxseg7ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_tumu(
@@ -1522,7 +1522,7 @@ void test_vluxseg7ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_tumu(
@@ -1545,7 +1545,7 @@ void test_vluxseg7ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_tumu(
@@ -1568,7 +1568,7 @@ void test_vluxseg7ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_tumu(
@@ -1591,7 +1591,7 @@ void test_vluxseg7ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_tumu(
@@ -1614,7 +1614,7 @@ void test_vluxseg7ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_tumu(
@@ -1637,7 +1637,7 @@ void test_vluxseg7ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_tumu(
@@ -1660,7 +1660,7 @@ void test_vluxseg7ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_tumu(
@@ -1683,7 +1683,7 @@ void test_vluxseg7ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_tumu(
@@ -1706,7 +1706,7 @@ void test_vluxseg7ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_tumu(
@@ -1729,7 +1729,7 @@ void test_vluxseg7ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_tumu(
@@ -1752,7 +1752,7 @@ void test_vluxseg7ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_tumu(
@@ -1775,7 +1775,7 @@ void test_vluxseg7ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_tumu(
@@ -1798,7 +1798,7 @@ void test_vluxseg7ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_mu(
@@ -1821,7 +1821,7 @@ void test_vluxseg7ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_mu(
@@ -1844,7 +1844,7 @@ void test_vluxseg7ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_mu(
@@ -1867,7 +1867,7 @@ void test_vluxseg7ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_mu(
@@ -1890,7 +1890,7 @@ void test_vluxseg7ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_mu(
@@ -1913,7 +1913,7 @@ void test_vluxseg7ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_mu(
@@ -1936,7 +1936,7 @@ void test_vluxseg7ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vluxseg7ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_mu(
@@ -1982,7 +1982,7 @@ void test_vluxseg7ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_mu(
@@ -2005,7 +2005,7 @@ void test_vluxseg7ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_mu(
@@ -2028,7 +2028,7 @@ void test_vluxseg7ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_mu(
@@ -2051,7 +2051,7 @@ void test_vluxseg7ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_mu(
@@ -2074,7 +2074,7 @@ void test_vluxseg7ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_mu(
@@ -2097,7 +2097,7 @@ void test_vluxseg7ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_mu(
@@ -2120,7 +2120,7 @@ void test_vluxseg7ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_mu(
@@ -2143,7 +2143,7 @@ void test_vluxseg7ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_mu(
@@ -2166,7 +2166,7 @@ void test_vluxseg7ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_mu(
@@ -2189,7 +2189,7 @@ void test_vluxseg7ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_mu(
@@ -2212,7 +2212,7 @@ void test_vluxseg7ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_mu(
@@ -2235,7 +2235,7 @@ void test_vluxseg7ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_mu(
@@ -2258,7 +2258,7 @@ void test_vluxseg7ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_mu(
@@ -2281,7 +2281,7 @@ void test_vluxseg7ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vluxseg7ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_mu(
@@ -2327,7 +2327,7 @@ void test_vluxseg7ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_mu(
@@ -2350,7 +2350,7 @@ void test_vluxseg7ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_mu(
@@ -2373,7 +2373,7 @@ void test_vluxseg7ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_mu(
@@ -2396,6 +2396,6 @@ void test_vluxseg7ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c
index a0aeaf91541d..aed72f2a1ccc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c
@@ -27,7 +27,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_tu(
@@ -50,7 +50,7 @@ void test_vluxseg7ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_tu(
@@ -73,7 +73,7 @@ void test_vluxseg7ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_tu(
@@ -96,7 +96,7 @@ void test_vluxseg7ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_tu(
@@ -119,7 +119,7 @@ void test_vluxseg7ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_tu(
@@ -142,7 +142,7 @@ void test_vluxseg7ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_tu(
@@ -165,7 +165,7 @@ void test_vluxseg7ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_tu(
@@ -188,7 +188,7 @@ void test_vluxseg7ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_tu(
@@ -211,7 +211,7 @@ void test_vluxseg7ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_tu(
@@ -234,7 +234,7 @@ void test_vluxseg7ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_tu(
@@ -257,7 +257,7 @@ void test_vluxseg7ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_tu(
@@ -280,7 +280,7 @@ void test_vluxseg7ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_tu(
@@ -303,7 +303,7 @@ void test_vluxseg7ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_tu(
@@ -326,7 +326,7 @@ void test_vluxseg7ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_tu(
@@ -349,7 +349,7 @@ void test_vluxseg7ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_tu(
@@ -372,7 +372,7 @@ void test_vluxseg7ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_tu(
@@ -395,7 +395,7 @@ void test_vluxseg7ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_tu(
@@ -418,7 +418,7 @@ void test_vluxseg7ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_tu(
@@ -441,7 +441,7 @@ void test_vluxseg7ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_tu(
@@ -464,7 +464,7 @@ void test_vluxseg7ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_tu(
@@ -487,7 +487,7 @@ void test_vluxseg7ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_tu(
@@ -510,7 +510,7 @@ void test_vluxseg7ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_tu(
@@ -533,7 +533,7 @@ void test_vluxseg7ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_tu(
@@ -556,7 +556,7 @@ void test_vluxseg7ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_tu(
@@ -579,7 +579,7 @@ void test_vluxseg7ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_tu(
@@ -602,7 +602,7 @@ void test_vluxseg7ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_tum(
@@ -625,7 +625,7 @@ void test_vluxseg7ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_tum(
@@ -648,7 +648,7 @@ void test_vluxseg7ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_tum(
@@ -671,7 +671,7 @@ void test_vluxseg7ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_tum(
@@ -694,7 +694,7 @@ void test_vluxseg7ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_tum(
@@ -717,7 +717,7 @@ void test_vluxseg7ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_tum(
@@ -740,7 +740,7 @@ void test_vluxseg7ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_tum(
@@ -763,7 +763,7 @@ void test_vluxseg7ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_tum(
@@ -786,7 +786,7 @@ void test_vluxseg7ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_tum(
@@ -809,7 +809,7 @@ void test_vluxseg7ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_tum(
@@ -832,7 +832,7 @@ void test_vluxseg7ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_tum(
@@ -855,7 +855,7 @@ void test_vluxseg7ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_tum(
@@ -878,7 +878,7 @@ void test_vluxseg7ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_tum(
@@ -901,7 +901,7 @@ void test_vluxseg7ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_tum(
@@ -924,7 +924,7 @@ void test_vluxseg7ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_tum(
@@ -947,7 +947,7 @@ void test_vluxseg7ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_tum(
@@ -970,7 +970,7 @@ void test_vluxseg7ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_tum(
@@ -993,7 +993,7 @@ void test_vluxseg7ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_tum(
@@ -1016,7 +1016,7 @@ void test_vluxseg7ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_tum(
@@ -1039,7 +1039,7 @@ void test_vluxseg7ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_tum(
@@ -1062,7 +1062,7 @@ void test_vluxseg7ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_tum(
@@ -1085,7 +1085,7 @@ void test_vluxseg7ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_tum(
@@ -1108,7 +1108,7 @@ void test_vluxseg7ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_tum(
@@ -1131,7 +1131,7 @@ void test_vluxseg7ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_tum(
@@ -1154,7 +1154,7 @@ void test_vluxseg7ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_tum(
@@ -1177,7 +1177,7 @@ void test_vluxseg7ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_tum(
@@ -1200,7 +1200,7 @@ void test_vluxseg7ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_tumu(
@@ -1223,7 +1223,7 @@ void test_vluxseg7ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_tumu(
@@ -1246,7 +1246,7 @@ void test_vluxseg7ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_tumu(
@@ -1269,7 +1269,7 @@ void test_vluxseg7ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_tumu(
@@ -1292,7 +1292,7 @@ void test_vluxseg7ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_tumu(
@@ -1315,7 +1315,7 @@ void test_vluxseg7ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_tumu(
@@ -1338,7 +1338,7 @@ void test_vluxseg7ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_tumu(
@@ -1361,7 +1361,7 @@ void test_vluxseg7ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_tumu(
@@ -1384,7 +1384,7 @@ void test_vluxseg7ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_tumu(
@@ -1407,7 +1407,7 @@ void test_vluxseg7ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_tumu(
@@ -1430,7 +1430,7 @@ void test_vluxseg7ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_tumu(
@@ -1453,7 +1453,7 @@ void test_vluxseg7ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_tumu(
@@ -1476,7 +1476,7 @@ void test_vluxseg7ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_tumu(
@@ -1499,7 +1499,7 @@ void test_vluxseg7ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_tumu(
@@ -1522,7 +1522,7 @@ void test_vluxseg7ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_tumu(
@@ -1545,7 +1545,7 @@ void test_vluxseg7ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_tumu(
@@ -1568,7 +1568,7 @@ void test_vluxseg7ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_tumu(
@@ -1591,7 +1591,7 @@ void test_vluxseg7ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_tumu(
@@ -1614,7 +1614,7 @@ void test_vluxseg7ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_tumu(
@@ -1637,7 +1637,7 @@ void test_vluxseg7ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_tumu(
@@ -1660,7 +1660,7 @@ void test_vluxseg7ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_tumu(
@@ -1683,7 +1683,7 @@ void test_vluxseg7ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_tumu(
@@ -1706,7 +1706,7 @@ void test_vluxseg7ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_tumu(
@@ -1729,7 +1729,7 @@ void test_vluxseg7ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_tumu(
@@ -1752,7 +1752,7 @@ void test_vluxseg7ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_tumu(
@@ -1775,7 +1775,7 @@ void test_vluxseg7ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_tumu(
@@ -1798,7 +1798,7 @@ void test_vluxseg7ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_mu(
@@ -1821,7 +1821,7 @@ void test_vluxseg7ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_mu(
@@ -1844,7 +1844,7 @@ void test_vluxseg7ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_mu(
@@ -1867,7 +1867,7 @@ void test_vluxseg7ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_mu(
@@ -1890,7 +1890,7 @@ void test_vluxseg7ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_mu(
@@ -1913,7 +1913,7 @@ void test_vluxseg7ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_mu(
@@ -1936,7 +1936,7 @@ void test_vluxseg7ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_mu(
@@ -1959,7 +1959,7 @@ void test_vluxseg7ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_mu(
@@ -1982,7 +1982,7 @@ void test_vluxseg7ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_mu(
@@ -2005,7 +2005,7 @@ void test_vluxseg7ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_mu(
@@ -2028,7 +2028,7 @@ void test_vluxseg7ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_mu(
@@ -2051,7 +2051,7 @@ void test_vluxseg7ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_mu(
@@ -2074,7 +2074,7 @@ void test_vluxseg7ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_mu(
@@ -2097,7 +2097,7 @@ void test_vluxseg7ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_mu(
@@ -2120,7 +2120,7 @@ void test_vluxseg7ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_mu(
@@ -2143,7 +2143,7 @@ void test_vluxseg7ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_mu(
@@ -2166,7 +2166,7 @@ void test_vluxseg7ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_mu(
@@ -2189,7 +2189,7 @@ void test_vluxseg7ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_mu(
@@ -2212,7 +2212,7 @@ void test_vluxseg7ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_mu(
@@ -2235,7 +2235,7 @@ void test_vluxseg7ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_mu(
@@ -2258,7 +2258,7 @@ void test_vluxseg7ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_mu(
@@ -2281,7 +2281,7 @@ void test_vluxseg7ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vluxseg7ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_mu(
@@ -2327,7 +2327,7 @@ void test_vluxseg7ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_mu(
@@ -2350,7 +2350,7 @@ void test_vluxseg7ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_mu(
@@ -2373,7 +2373,7 @@ void test_vluxseg7ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_mu(
@@ -2396,6 +2396,6 @@ void test_vluxseg7ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg7ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+ return __riscv_vluxseg7ei8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c
index c7331409f43e..01970ddf5d50 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vluxseg8ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vluxseg8ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_tu(
@@ -104,7 +104,7 @@ void test_vluxseg8ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_tu(
@@ -129,7 +129,7 @@ void test_vluxseg8ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_tu(
@@ -154,7 +154,7 @@ void test_vluxseg8ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_tu(
@@ -179,7 +179,7 @@ void test_vluxseg8ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_tu(
@@ -204,7 +204,7 @@ void test_vluxseg8ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_tu(
@@ -229,7 +229,7 @@ void test_vluxseg8ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_tu(
@@ -254,7 +254,7 @@ void test_vluxseg8ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_tu(
@@ -279,7 +279,7 @@ void test_vluxseg8ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_tu(
@@ -304,7 +304,7 @@ void test_vluxseg8ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_tu(
@@ -329,7 +329,7 @@ void test_vluxseg8ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_tu(
@@ -354,7 +354,7 @@ void test_vluxseg8ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_tu(
@@ -379,7 +379,7 @@ void test_vluxseg8ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_tu(
@@ -404,7 +404,7 @@ void test_vluxseg8ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_tu(
@@ -429,7 +429,7 @@ void test_vluxseg8ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_tu(
@@ -454,7 +454,7 @@ void test_vluxseg8ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_tu(
@@ -479,7 +479,7 @@ void test_vluxseg8ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_tu(
@@ -504,7 +504,7 @@ void test_vluxseg8ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_tu(
@@ -529,7 +529,7 @@ void test_vluxseg8ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_tu(
@@ -554,7 +554,7 @@ void test_vluxseg8ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_tu(
@@ -579,7 +579,7 @@ void test_vluxseg8ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_tu(
@@ -604,7 +604,7 @@ void test_vluxseg8ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_tu(
@@ -629,7 +629,7 @@ void test_vluxseg8ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_tu(
@@ -654,7 +654,7 @@ void test_vluxseg8ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_tum(
@@ -679,7 +679,7 @@ void test_vluxseg8ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_tum(
@@ -704,7 +704,7 @@ void test_vluxseg8ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_tum(
@@ -729,7 +729,7 @@ void test_vluxseg8ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_tum(
@@ -754,7 +754,7 @@ void test_vluxseg8ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_tum(
@@ -779,7 +779,7 @@ void test_vluxseg8ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_tum(
@@ -804,7 +804,7 @@ void test_vluxseg8ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_tum(
@@ -829,7 +829,7 @@ void test_vluxseg8ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_tum(
@@ -854,7 +854,7 @@ void test_vluxseg8ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_tum(
@@ -879,7 +879,7 @@ void test_vluxseg8ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_tum(
@@ -904,7 +904,7 @@ void test_vluxseg8ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_tum(
@@ -929,7 +929,7 @@ void test_vluxseg8ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_tum(
@@ -954,7 +954,7 @@ void test_vluxseg8ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_tum(
@@ -979,7 +979,7 @@ void test_vluxseg8ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_tum(
@@ -1004,7 +1004,7 @@ void test_vluxseg8ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_tum(
@@ -1029,7 +1029,7 @@ void test_vluxseg8ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg8ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_tum(
@@ -1079,7 +1079,7 @@ void test_vluxseg8ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_tum(
@@ -1104,7 +1104,7 @@ void test_vluxseg8ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_tum(
@@ -1129,7 +1129,7 @@ void test_vluxseg8ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_tum(
@@ -1154,7 +1154,7 @@ void test_vluxseg8ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_tum(
@@ -1179,7 +1179,7 @@ void test_vluxseg8ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_tum(
@@ -1204,7 +1204,7 @@ void test_vluxseg8ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_tum(
@@ -1229,7 +1229,7 @@ void test_vluxseg8ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_tum(
@@ -1254,7 +1254,7 @@ void test_vluxseg8ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_tum(
@@ -1279,7 +1279,7 @@ void test_vluxseg8ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_tum(
@@ -1304,7 +1304,7 @@ void test_vluxseg8ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_tumu(
@@ -1329,7 +1329,7 @@ void test_vluxseg8ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vluxseg8ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_tumu(
@@ -1379,7 +1379,7 @@ void test_vluxseg8ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_tumu(
@@ -1404,7 +1404,7 @@ void test_vluxseg8ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg8ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_tumu(
@@ -1454,7 +1454,7 @@ void test_vluxseg8ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_tumu(
@@ -1479,7 +1479,7 @@ void test_vluxseg8ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_tumu(
@@ -1504,7 +1504,7 @@ void test_vluxseg8ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_tumu(
@@ -1529,7 +1529,7 @@ void test_vluxseg8ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_tumu(
@@ -1554,7 +1554,7 @@ void test_vluxseg8ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg8ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_tumu(
@@ -1604,7 +1604,7 @@ void test_vluxseg8ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vluxseg8ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_tumu(
@@ -1654,7 +1654,7 @@ void test_vluxseg8ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_tumu(
@@ -1679,7 +1679,7 @@ void test_vluxseg8ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_tumu(
@@ -1704,7 +1704,7 @@ void test_vluxseg8ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_tumu(
@@ -1729,7 +1729,7 @@ void test_vluxseg8ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_tumu(
@@ -1754,7 +1754,7 @@ void test_vluxseg8ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_tumu(
@@ -1779,7 +1779,7 @@ void test_vluxseg8ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_tumu(
@@ -1804,7 +1804,7 @@ void test_vluxseg8ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_tumu(
@@ -1829,7 +1829,7 @@ void test_vluxseg8ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_tumu(
@@ -1854,7 +1854,7 @@ void test_vluxseg8ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_tumu(
@@ -1879,7 +1879,7 @@ void test_vluxseg8ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_tumu(
@@ -1904,7 +1904,7 @@ void test_vluxseg8ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_tumu(
@@ -1929,7 +1929,7 @@ void test_vluxseg8ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_tumu(
@@ -1954,7 +1954,7 @@ void test_vluxseg8ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_mu(
@@ -1979,7 +1979,7 @@ void test_vluxseg8ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_mu(
@@ -2004,7 +2004,7 @@ void test_vluxseg8ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_mu(
@@ -2029,7 +2029,7 @@ void test_vluxseg8ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_mu(
@@ -2054,7 +2054,7 @@ void test_vluxseg8ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_mu(
@@ -2079,7 +2079,7 @@ void test_vluxseg8ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg8ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_mu(
@@ -2129,7 +2129,7 @@ void test_vluxseg8ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_mu(
@@ -2154,7 +2154,7 @@ void test_vluxseg8ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_mu(
@@ -2179,7 +2179,7 @@ void test_vluxseg8ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_mu(
@@ -2204,7 +2204,7 @@ void test_vluxseg8ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_mu(
@@ -2229,7 +2229,7 @@ void test_vluxseg8ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_mu(
@@ -2254,7 +2254,7 @@ void test_vluxseg8ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vluxseg8ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vluxseg8ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_mu(
@@ -2329,7 +2329,7 @@ void test_vluxseg8ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_mu(
@@ -2354,7 +2354,7 @@ void test_vluxseg8ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_mu(
@@ -2379,7 +2379,7 @@ void test_vluxseg8ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_mu(
@@ -2404,7 +2404,7 @@ void test_vluxseg8ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_mu(
@@ -2429,7 +2429,7 @@ void test_vluxseg8ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_mu(
@@ -2454,7 +2454,7 @@ void test_vluxseg8ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_mu(
@@ -2479,7 +2479,7 @@ void test_vluxseg8ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_mu(
@@ -2504,7 +2504,7 @@ void test_vluxseg8ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_mu(
@@ -2529,7 +2529,7 @@ void test_vluxseg8ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_mu(
@@ -2554,7 +2554,7 @@ void test_vluxseg8ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_mu(
@@ -2579,7 +2579,7 @@ void test_vluxseg8ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_mu(
@@ -2604,6 +2604,6 @@ void test_vluxseg8ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
- return vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c
index af33dbd3b85c..41ae3819f0e2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vluxseg8ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vluxseg8ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_tu(
@@ -104,7 +104,7 @@ void test_vluxseg8ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_tu(
@@ -129,7 +129,7 @@ void test_vluxseg8ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_tu(
@@ -154,7 +154,7 @@ void test_vluxseg8ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_tu(
@@ -179,7 +179,7 @@ void test_vluxseg8ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_tu(
@@ -204,7 +204,7 @@ void test_vluxseg8ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_tu(
@@ -229,7 +229,7 @@ void test_vluxseg8ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_tu(
@@ -254,7 +254,7 @@ void test_vluxseg8ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_tu(
@@ -279,7 +279,7 @@ void test_vluxseg8ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_tu(
@@ -304,7 +304,7 @@ void test_vluxseg8ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_tu(
@@ -329,7 +329,7 @@ void test_vluxseg8ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_tu(
@@ -354,7 +354,7 @@ void test_vluxseg8ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_tu(
@@ -379,7 +379,7 @@ void test_vluxseg8ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_tu(
@@ -404,7 +404,7 @@ void test_vluxseg8ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_tu(
@@ -429,7 +429,7 @@ void test_vluxseg8ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_tu(
@@ -454,7 +454,7 @@ void test_vluxseg8ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_tu(
@@ -479,7 +479,7 @@ void test_vluxseg8ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_tu(
@@ -504,7 +504,7 @@ void test_vluxseg8ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_tu(
@@ -529,7 +529,7 @@ void test_vluxseg8ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_tu(
@@ -554,7 +554,7 @@ void test_vluxseg8ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_tu(
@@ -579,7 +579,7 @@ void test_vluxseg8ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_tu(
@@ -604,7 +604,7 @@ void test_vluxseg8ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_tu(
@@ -629,7 +629,7 @@ void test_vluxseg8ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_tu(
@@ -654,7 +654,7 @@ void test_vluxseg8ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_tum(
@@ -679,7 +679,7 @@ void test_vluxseg8ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_tum(
@@ -704,7 +704,7 @@ void test_vluxseg8ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_tum(
@@ -729,7 +729,7 @@ void test_vluxseg8ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_tum(
@@ -754,7 +754,7 @@ void test_vluxseg8ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_tum(
@@ -779,7 +779,7 @@ void test_vluxseg8ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_tum(
@@ -804,7 +804,7 @@ void test_vluxseg8ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_tum(
@@ -829,7 +829,7 @@ void test_vluxseg8ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_tum(
@@ -854,7 +854,7 @@ void test_vluxseg8ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_tum(
@@ -879,7 +879,7 @@ void test_vluxseg8ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_tum(
@@ -904,7 +904,7 @@ void test_vluxseg8ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_tum(
@@ -929,7 +929,7 @@ void test_vluxseg8ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_tum(
@@ -954,7 +954,7 @@ void test_vluxseg8ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_tum(
@@ -979,7 +979,7 @@ void test_vluxseg8ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_tum(
@@ -1004,7 +1004,7 @@ void test_vluxseg8ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_tum(
@@ -1029,7 +1029,7 @@ void test_vluxseg8ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg8ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_tum(
@@ -1079,7 +1079,7 @@ void test_vluxseg8ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_tum(
@@ -1104,7 +1104,7 @@ void test_vluxseg8ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_tum(
@@ -1129,7 +1129,7 @@ void test_vluxseg8ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_tum(
@@ -1154,7 +1154,7 @@ void test_vluxseg8ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_tum(
@@ -1179,7 +1179,7 @@ void test_vluxseg8ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_tum(
@@ -1204,7 +1204,7 @@ void test_vluxseg8ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_tum(
@@ -1229,7 +1229,7 @@ void test_vluxseg8ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_tum(
@@ -1254,7 +1254,7 @@ void test_vluxseg8ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_tum(
@@ -1279,7 +1279,7 @@ void test_vluxseg8ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_tum(
@@ -1304,7 +1304,7 @@ void test_vluxseg8ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_tumu(
@@ -1329,7 +1329,7 @@ void test_vluxseg8ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vluxseg8ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_tumu(
@@ -1379,7 +1379,7 @@ void test_vluxseg8ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_tumu(
@@ -1404,7 +1404,7 @@ void test_vluxseg8ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg8ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_tumu(
@@ -1454,7 +1454,7 @@ void test_vluxseg8ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_tumu(
@@ -1479,7 +1479,7 @@ void test_vluxseg8ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_tumu(
@@ -1504,7 +1504,7 @@ void test_vluxseg8ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_tumu(
@@ -1529,7 +1529,7 @@ void test_vluxseg8ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_tumu(
@@ -1554,7 +1554,7 @@ void test_vluxseg8ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg8ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_tumu(
@@ -1604,7 +1604,7 @@ void test_vluxseg8ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vluxseg8ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_tumu(
@@ -1654,7 +1654,7 @@ void test_vluxseg8ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_tumu(
@@ -1679,7 +1679,7 @@ void test_vluxseg8ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_tumu(
@@ -1704,7 +1704,7 @@ void test_vluxseg8ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_tumu(
@@ -1729,7 +1729,7 @@ void test_vluxseg8ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_tumu(
@@ -1754,7 +1754,7 @@ void test_vluxseg8ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_tumu(
@@ -1779,7 +1779,7 @@ void test_vluxseg8ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_tumu(
@@ -1804,7 +1804,7 @@ void test_vluxseg8ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_tumu(
@@ -1829,7 +1829,7 @@ void test_vluxseg8ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_tumu(
@@ -1854,7 +1854,7 @@ void test_vluxseg8ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_tumu(
@@ -1879,7 +1879,7 @@ void test_vluxseg8ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_tumu(
@@ -1904,7 +1904,7 @@ void test_vluxseg8ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_tumu(
@@ -1929,7 +1929,7 @@ void test_vluxseg8ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_tumu(
@@ -1954,7 +1954,7 @@ void test_vluxseg8ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_mu(
@@ -1979,7 +1979,7 @@ void test_vluxseg8ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_mu(
@@ -2004,7 +2004,7 @@ void test_vluxseg8ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_mu(
@@ -2029,7 +2029,7 @@ void test_vluxseg8ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_mu(
@@ -2054,7 +2054,7 @@ void test_vluxseg8ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_mu(
@@ -2079,7 +2079,7 @@ void test_vluxseg8ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg8ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_mu(
@@ -2129,7 +2129,7 @@ void test_vluxseg8ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_mu(
@@ -2154,7 +2154,7 @@ void test_vluxseg8ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_mu(
@@ -2179,7 +2179,7 @@ void test_vluxseg8ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_mu(
@@ -2204,7 +2204,7 @@ void test_vluxseg8ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_mu(
@@ -2229,7 +2229,7 @@ void test_vluxseg8ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_mu(
@@ -2254,7 +2254,7 @@ void test_vluxseg8ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vluxseg8ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vluxseg8ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_mu(
@@ -2329,7 +2329,7 @@ void test_vluxseg8ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_mu(
@@ -2354,7 +2354,7 @@ void test_vluxseg8ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_mu(
@@ -2379,7 +2379,7 @@ void test_vluxseg8ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_mu(
@@ -2404,7 +2404,7 @@ void test_vluxseg8ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_mu(
@@ -2429,7 +2429,7 @@ void test_vluxseg8ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_mu(
@@ -2454,7 +2454,7 @@ void test_vluxseg8ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_mu(
@@ -2479,7 +2479,7 @@ void test_vluxseg8ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_mu(
@@ -2504,7 +2504,7 @@ void test_vluxseg8ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_mu(
@@ -2529,7 +2529,7 @@ void test_vluxseg8ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_mu(
@@ -2554,7 +2554,7 @@ void test_vluxseg8ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_mu(
@@ -2579,7 +2579,7 @@ void test_vluxseg8ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_mu(
@@ -2604,6 +2604,6 @@ void test_vluxseg8ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
- return vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c
index a0b0e70af770..c5d8f1e78e2b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vluxseg8ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vluxseg8ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_tu(
@@ -104,7 +104,7 @@ void test_vluxseg8ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_tu(
@@ -129,7 +129,7 @@ void test_vluxseg8ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_tu(
@@ -154,7 +154,7 @@ void test_vluxseg8ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_tu(
@@ -179,7 +179,7 @@ void test_vluxseg8ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_tu(
@@ -204,7 +204,7 @@ void test_vluxseg8ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_tu(
@@ -229,7 +229,7 @@ void test_vluxseg8ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_tu(
@@ -254,7 +254,7 @@ void test_vluxseg8ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_tu(
@@ -279,7 +279,7 @@ void test_vluxseg8ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_tu(
@@ -304,7 +304,7 @@ void test_vluxseg8ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_tu(
@@ -329,7 +329,7 @@ void test_vluxseg8ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_tu(
@@ -354,7 +354,7 @@ void test_vluxseg8ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_tu(
@@ -379,7 +379,7 @@ void test_vluxseg8ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_tu(
@@ -404,7 +404,7 @@ void test_vluxseg8ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_tu(
@@ -429,7 +429,7 @@ void test_vluxseg8ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_tu(
@@ -454,7 +454,7 @@ void test_vluxseg8ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_tu(
@@ -479,7 +479,7 @@ void test_vluxseg8ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_tu(
@@ -504,7 +504,7 @@ void test_vluxseg8ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_tu(
@@ -529,7 +529,7 @@ void test_vluxseg8ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_tu(
@@ -554,7 +554,7 @@ void test_vluxseg8ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_tu(
@@ -579,7 +579,7 @@ void test_vluxseg8ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_tu(
@@ -604,7 +604,7 @@ void test_vluxseg8ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_tu(
@@ -629,7 +629,7 @@ void test_vluxseg8ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_tu(
@@ -654,7 +654,7 @@ void test_vluxseg8ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_tum(
@@ -679,7 +679,7 @@ void test_vluxseg8ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_tum(
@@ -704,7 +704,7 @@ void test_vluxseg8ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_tum(
@@ -729,7 +729,7 @@ void test_vluxseg8ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_tum(
@@ -754,7 +754,7 @@ void test_vluxseg8ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_tum(
@@ -779,7 +779,7 @@ void test_vluxseg8ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_tum(
@@ -804,7 +804,7 @@ void test_vluxseg8ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_tum(
@@ -829,7 +829,7 @@ void test_vluxseg8ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_tum(
@@ -854,7 +854,7 @@ void test_vluxseg8ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_tum(
@@ -879,7 +879,7 @@ void test_vluxseg8ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_tum(
@@ -904,7 +904,7 @@ void test_vluxseg8ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_tum(
@@ -929,7 +929,7 @@ void test_vluxseg8ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_tum(
@@ -954,7 +954,7 @@ void test_vluxseg8ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_tum(
@@ -979,7 +979,7 @@ void test_vluxseg8ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_tum(
@@ -1004,7 +1004,7 @@ void test_vluxseg8ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_tum(
@@ -1029,7 +1029,7 @@ void test_vluxseg8ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg8ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_tum(
@@ -1079,7 +1079,7 @@ void test_vluxseg8ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_tum(
@@ -1104,7 +1104,7 @@ void test_vluxseg8ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_tum(
@@ -1129,7 +1129,7 @@ void test_vluxseg8ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_tum(
@@ -1154,7 +1154,7 @@ void test_vluxseg8ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_tum(
@@ -1179,7 +1179,7 @@ void test_vluxseg8ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_tum(
@@ -1204,7 +1204,7 @@ void test_vluxseg8ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_tum(
@@ -1229,7 +1229,7 @@ void test_vluxseg8ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_tum(
@@ -1254,7 +1254,7 @@ void test_vluxseg8ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_tum(
@@ -1279,7 +1279,7 @@ void test_vluxseg8ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_tum(
@@ -1304,7 +1304,7 @@ void test_vluxseg8ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_tumu(
@@ -1329,7 +1329,7 @@ void test_vluxseg8ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vluxseg8ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_tumu(
@@ -1379,7 +1379,7 @@ void test_vluxseg8ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_tumu(
@@ -1404,7 +1404,7 @@ void test_vluxseg8ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg8ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_tumu(
@@ -1454,7 +1454,7 @@ void test_vluxseg8ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_tumu(
@@ -1479,7 +1479,7 @@ void test_vluxseg8ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_tumu(
@@ -1504,7 +1504,7 @@ void test_vluxseg8ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_tumu(
@@ -1529,7 +1529,7 @@ void test_vluxseg8ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_tumu(
@@ -1554,7 +1554,7 @@ void test_vluxseg8ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg8ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_tumu(
@@ -1604,7 +1604,7 @@ void test_vluxseg8ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vluxseg8ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_tumu(
@@ -1654,7 +1654,7 @@ void test_vluxseg8ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_tumu(
@@ -1679,7 +1679,7 @@ void test_vluxseg8ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_tumu(
@@ -1704,7 +1704,7 @@ void test_vluxseg8ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_tumu(
@@ -1729,7 +1729,7 @@ void test_vluxseg8ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_tumu(
@@ -1754,7 +1754,7 @@ void test_vluxseg8ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_tumu(
@@ -1779,7 +1779,7 @@ void test_vluxseg8ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_tumu(
@@ -1804,7 +1804,7 @@ void test_vluxseg8ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_tumu(
@@ -1829,7 +1829,7 @@ void test_vluxseg8ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_tumu(
@@ -1854,7 +1854,7 @@ void test_vluxseg8ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_tumu(
@@ -1879,7 +1879,7 @@ void test_vluxseg8ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_tumu(
@@ -1904,7 +1904,7 @@ void test_vluxseg8ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_tumu(
@@ -1929,7 +1929,7 @@ void test_vluxseg8ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_tumu(
@@ -1954,7 +1954,7 @@ void test_vluxseg8ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_mu(
@@ -1979,7 +1979,7 @@ void test_vluxseg8ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_mu(
@@ -2004,7 +2004,7 @@ void test_vluxseg8ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_mu(
@@ -2029,7 +2029,7 @@ void test_vluxseg8ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_mu(
@@ -2054,7 +2054,7 @@ void test_vluxseg8ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_mu(
@@ -2079,7 +2079,7 @@ void test_vluxseg8ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg8ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_mu(
@@ -2129,7 +2129,7 @@ void test_vluxseg8ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_mu(
@@ -2154,7 +2154,7 @@ void test_vluxseg8ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_mu(
@@ -2179,7 +2179,7 @@ void test_vluxseg8ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_mu(
@@ -2204,7 +2204,7 @@ void test_vluxseg8ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_mu(
@@ -2229,7 +2229,7 @@ void test_vluxseg8ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_mu(
@@ -2254,7 +2254,7 @@ void test_vluxseg8ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vluxseg8ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vluxseg8ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_mu(
@@ -2329,7 +2329,7 @@ void test_vluxseg8ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_mu(
@@ -2354,7 +2354,7 @@ void test_vluxseg8ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_mu(
@@ -2379,7 +2379,7 @@ void test_vluxseg8ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_mu(
@@ -2404,7 +2404,7 @@ void test_vluxseg8ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_mu(
@@ -2429,7 +2429,7 @@ void test_vluxseg8ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_mu(
@@ -2454,7 +2454,7 @@ void test_vluxseg8ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_mu(
@@ -2479,7 +2479,7 @@ void test_vluxseg8ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_mu(
@@ -2504,7 +2504,7 @@ void test_vluxseg8ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_mu(
@@ -2529,7 +2529,7 @@ void test_vluxseg8ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_mu(
@@ -2554,7 +2554,7 @@ void test_vluxseg8ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_mu(
@@ -2579,7 +2579,7 @@ void test_vluxseg8ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_mu(
@@ -2604,6 +2604,6 @@ void test_vluxseg8ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
- return vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c
index 55b8de21e3c5..272c0259edd4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c
@@ -29,7 +29,7 @@
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_tu(
@@ -54,7 +54,7 @@ void test_vluxseg8ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_tu(
@@ -79,7 +79,7 @@ void test_vluxseg8ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_tu(
@@ -104,7 +104,7 @@ void test_vluxseg8ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_tu(
@@ -129,7 +129,7 @@ void test_vluxseg8ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_tu(
@@ -154,7 +154,7 @@ void test_vluxseg8ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_tu(
@@ -179,7 +179,7 @@ void test_vluxseg8ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_tu(
@@ -204,7 +204,7 @@ void test_vluxseg8ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_tu(
@@ -229,7 +229,7 @@ void test_vluxseg8ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_tu(
@@ -254,7 +254,7 @@ void test_vluxseg8ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_tu(
@@ -279,7 +279,7 @@ void test_vluxseg8ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_tu(
@@ -304,7 +304,7 @@ void test_vluxseg8ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_tu(
@@ -329,7 +329,7 @@ void test_vluxseg8ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_tu(
@@ -354,7 +354,7 @@ void test_vluxseg8ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_tu(
@@ -379,7 +379,7 @@ void test_vluxseg8ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_tu(
@@ -404,7 +404,7 @@ void test_vluxseg8ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_tu(
@@ -429,7 +429,7 @@ void test_vluxseg8ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_tu(
@@ -454,7 +454,7 @@ void test_vluxseg8ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_tu(
@@ -479,7 +479,7 @@ void test_vluxseg8ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_tu(
@@ -504,7 +504,7 @@ void test_vluxseg8ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_tu(
@@ -529,7 +529,7 @@ void test_vluxseg8ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_tu(
@@ -554,7 +554,7 @@ void test_vluxseg8ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_tu(
@@ -579,7 +579,7 @@ void test_vluxseg8ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_tu(
@@ -604,7 +604,7 @@ void test_vluxseg8ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_tu(
@@ -629,7 +629,7 @@ void test_vluxseg8ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_tu(
@@ -654,7 +654,7 @@ void test_vluxseg8ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_tum(
@@ -679,7 +679,7 @@ void test_vluxseg8ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_tum(
@@ -704,7 +704,7 @@ void test_vluxseg8ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_tum(
@@ -729,7 +729,7 @@ void test_vluxseg8ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_tum(
@@ -754,7 +754,7 @@ void test_vluxseg8ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_tum(
@@ -779,7 +779,7 @@ void test_vluxseg8ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_tum(
@@ -804,7 +804,7 @@ void test_vluxseg8ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_tum(
@@ -829,7 +829,7 @@ void test_vluxseg8ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_tum(
@@ -854,7 +854,7 @@ void test_vluxseg8ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_tum(
@@ -879,7 +879,7 @@ void test_vluxseg8ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_tum(
@@ -904,7 +904,7 @@ void test_vluxseg8ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_tum(
@@ -929,7 +929,7 @@ void test_vluxseg8ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_tum(
@@ -954,7 +954,7 @@ void test_vluxseg8ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_tum(
@@ -979,7 +979,7 @@ void test_vluxseg8ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_tum(
@@ -1004,7 +1004,7 @@ void test_vluxseg8ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_tum(
@@ -1029,7 +1029,7 @@ void test_vluxseg8ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_tum(
@@ -1054,7 +1054,7 @@ void test_vluxseg8ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_tum(
@@ -1079,7 +1079,7 @@ void test_vluxseg8ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_tum(
@@ -1104,7 +1104,7 @@ void test_vluxseg8ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_tum(
@@ -1129,7 +1129,7 @@ void test_vluxseg8ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_tum(
@@ -1154,7 +1154,7 @@ void test_vluxseg8ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_tum(
@@ -1179,7 +1179,7 @@ void test_vluxseg8ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_tum(
@@ -1204,7 +1204,7 @@ void test_vluxseg8ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_tum(
@@ -1229,7 +1229,7 @@ void test_vluxseg8ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_tum(
@@ -1254,7 +1254,7 @@ void test_vluxseg8ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_tum(
@@ -1279,7 +1279,7 @@ void test_vluxseg8ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_tum(
@@ -1304,7 +1304,7 @@ void test_vluxseg8ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_tumu(
@@ -1329,7 +1329,7 @@ void test_vluxseg8ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_tumu(
@@ -1354,7 +1354,7 @@ void test_vluxseg8ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_tumu(
@@ -1379,7 +1379,7 @@ void test_vluxseg8ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_tumu(
@@ -1404,7 +1404,7 @@ void test_vluxseg8ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_tumu(
@@ -1429,7 +1429,7 @@ void test_vluxseg8ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_tumu(
@@ -1454,7 +1454,7 @@ void test_vluxseg8ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_tumu(
@@ -1479,7 +1479,7 @@ void test_vluxseg8ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_tumu(
@@ -1504,7 +1504,7 @@ void test_vluxseg8ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_tumu(
@@ -1529,7 +1529,7 @@ void test_vluxseg8ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_tumu(
@@ -1554,7 +1554,7 @@ void test_vluxseg8ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_tumu(
@@ -1579,7 +1579,7 @@ void test_vluxseg8ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_tumu(
@@ -1604,7 +1604,7 @@ void test_vluxseg8ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_tumu(
@@ -1629,7 +1629,7 @@ void test_vluxseg8ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_tumu(
@@ -1654,7 +1654,7 @@ void test_vluxseg8ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_tumu(
@@ -1679,7 +1679,7 @@ void test_vluxseg8ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_tumu(
@@ -1704,7 +1704,7 @@ void test_vluxseg8ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_tumu(
@@ -1729,7 +1729,7 @@ void test_vluxseg8ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_tumu(
@@ -1754,7 +1754,7 @@ void test_vluxseg8ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_tumu(
@@ -1779,7 +1779,7 @@ void test_vluxseg8ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_tumu(
@@ -1804,7 +1804,7 @@ void test_vluxseg8ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_tumu(
@@ -1829,7 +1829,7 @@ void test_vluxseg8ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_tumu(
@@ -1854,7 +1854,7 @@ void test_vluxseg8ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_tumu(
@@ -1879,7 +1879,7 @@ void test_vluxseg8ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_tumu(
@@ -1904,7 +1904,7 @@ void test_vluxseg8ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_tumu(
@@ -1929,7 +1929,7 @@ void test_vluxseg8ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_tumu(
@@ -1954,7 +1954,7 @@ void test_vluxseg8ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_mu(
@@ -1979,7 +1979,7 @@ void test_vluxseg8ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_mu(
@@ -2004,7 +2004,7 @@ void test_vluxseg8ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_mu(
@@ -2029,7 +2029,7 @@ void test_vluxseg8ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_mu(
@@ -2054,7 +2054,7 @@ void test_vluxseg8ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_mu(
@@ -2079,7 +2079,7 @@ void test_vluxseg8ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_mu(
@@ -2104,7 +2104,7 @@ void test_vluxseg8ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_mu(
@@ -2129,7 +2129,7 @@ void test_vluxseg8ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_mu(
@@ -2154,7 +2154,7 @@ void test_vluxseg8ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_mu(
@@ -2179,7 +2179,7 @@ void test_vluxseg8ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_mu(
@@ -2204,7 +2204,7 @@ void test_vluxseg8ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_mu(
@@ -2229,7 +2229,7 @@ void test_vluxseg8ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_mu(
@@ -2254,7 +2254,7 @@ void test_vluxseg8ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_mu(
@@ -2279,7 +2279,7 @@ void test_vluxseg8ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_mu(
@@ -2304,7 +2304,7 @@ void test_vluxseg8ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_mu(
@@ -2329,7 +2329,7 @@ void test_vluxseg8ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_mu(
@@ -2354,7 +2354,7 @@ void test_vluxseg8ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_mu(
@@ -2379,7 +2379,7 @@ void test_vluxseg8ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_mu(
@@ -2404,7 +2404,7 @@ void test_vluxseg8ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_mu(
@@ -2429,7 +2429,7 @@ void test_vluxseg8ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_mu(
@@ -2454,7 +2454,7 @@ void test_vluxseg8ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_mu(
@@ -2479,7 +2479,7 @@ void test_vluxseg8ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2,
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_mu(
@@ -2504,7 +2504,7 @@ void test_vluxseg8ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_mu(
@@ -2529,7 +2529,7 @@ void test_vluxseg8ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_mu(
@@ -2554,7 +2554,7 @@ void test_vluxseg8ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_mu(
@@ -2579,7 +2579,7 @@ void test_vluxseg8ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_mu(
@@ -2604,6 +2604,6 @@ void test_vluxseg8ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *
// CHECK-RV64-NEXT: ret void
//
void test_vluxseg8ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
- return vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+ return __riscv_vluxseg8ei8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmacc.c
index caa1b0fa7c92..7e03869166de 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_tu(
@@ -22,7 +22,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_tu(
@@ -31,7 +31,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_tu(
@@ -40,7 +40,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_tu(
@@ -49,7 +49,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_tu(
@@ -58,7 +58,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_tu(
@@ -67,7 +67,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_tu(
@@ -76,7 +76,7 @@ vint8m1_t test_vmacc_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_tu(
@@ -85,7 +85,7 @@ vint8m1_t test_vmacc_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_tu(
@@ -94,7 +94,7 @@ vint8m2_t test_vmacc_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_tu(
@@ -103,7 +103,7 @@ vint8m2_t test_vmacc_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_tu(
@@ -112,7 +112,7 @@ vint8m4_t test_vmacc_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_tu(
@@ -121,7 +121,7 @@ vint8m4_t test_vmacc_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_tu(
@@ -130,7 +130,7 @@ vint8m8_t test_vmacc_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_tu(
@@ -139,7 +139,7 @@ vint8m8_t test_vmacc_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_tu(
@@ -148,7 +148,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_tu(
@@ -157,7 +157,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_tu(
@@ -166,7 +166,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_tu(
@@ -175,7 +175,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_tu(
@@ -184,7 +184,7 @@ vint16m1_t test_vmacc_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_tu(
@@ -193,7 +193,7 @@ vint16m1_t test_vmacc_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_tu(
@@ -202,7 +202,7 @@ vint16m2_t test_vmacc_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_tu(
@@ -211,7 +211,7 @@ vint16m2_t test_vmacc_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_tu(
@@ -220,7 +220,7 @@ vint16m4_t test_vmacc_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_tu(
@@ -229,7 +229,7 @@ vint16m4_t test_vmacc_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_tu(
@@ -238,7 +238,7 @@ vint16m8_t test_vmacc_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tu(
@@ -247,7 +247,7 @@ vint16m8_t test_vmacc_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tu(
@@ -256,7 +256,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_tu(
@@ -265,7 +265,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_tu(
@@ -274,7 +274,7 @@ vint32m1_t test_vmacc_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vmacc_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vmacc_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_tu(
@@ -301,7 +301,7 @@ vint32m2_t test_vmacc_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_tu(
@@ -310,7 +310,7 @@ vint32m4_t test_vmacc_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_tu(
@@ -319,7 +319,7 @@ vint32m4_t test_vmacc_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_tu(
@@ -328,7 +328,7 @@ vint32m8_t test_vmacc_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_tu(
@@ -337,7 +337,7 @@ vint32m8_t test_vmacc_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_tu(
@@ -346,7 +346,7 @@ vint64m1_t test_vmacc_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_tu(
@@ -355,7 +355,7 @@ vint64m1_t test_vmacc_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_tu(
@@ -364,7 +364,7 @@ vint64m2_t test_vmacc_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_tu(
@@ -373,7 +373,7 @@ vint64m2_t test_vmacc_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_tu(
@@ -382,7 +382,7 @@ vint64m4_t test_vmacc_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_tu(
@@ -391,7 +391,7 @@ vint64m4_t test_vmacc_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_tu(
@@ -400,7 +400,7 @@ vint64m8_t test_vmacc_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_tu(
@@ -409,7 +409,7 @@ vint64m8_t test_vmacc_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_tu(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_tu(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_tu(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_tu(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_tu(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_tu(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_tu(
@@ -472,7 +472,7 @@ vuint8m1_t test_vmacc_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_tu(
@@ -481,7 +481,7 @@ vuint8m1_t test_vmacc_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_tu(
@@ -490,7 +490,7 @@ vuint8m2_t test_vmacc_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_tu(
@@ -499,7 +499,7 @@ vuint8m2_t test_vmacc_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_tu(
@@ -508,7 +508,7 @@ vuint8m4_t test_vmacc_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_tu(
@@ -517,7 +517,7 @@ vuint8m4_t test_vmacc_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_tu(
@@ -526,7 +526,7 @@ vuint8m8_t test_vmacc_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_tu(
@@ -535,7 +535,7 @@ vuint8m8_t test_vmacc_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_tu(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_tu(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_tu(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_tu(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_tu(
@@ -580,7 +580,7 @@ vuint16m1_t test_vmacc_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_tu(
@@ -589,7 +589,7 @@ vuint16m1_t test_vmacc_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_tu(
@@ -598,7 +598,7 @@ vuint16m2_t test_vmacc_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_tu(
@@ -607,7 +607,7 @@ vuint16m2_t test_vmacc_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_tu(
@@ -616,7 +616,7 @@ vuint16m4_t test_vmacc_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_tu(
@@ -625,7 +625,7 @@ vuint16m4_t test_vmacc_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_tu(
@@ -634,7 +634,7 @@ vuint16m8_t test_vmacc_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tu(
@@ -643,7 +643,7 @@ vuint16m8_t test_vmacc_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tu(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_tu(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_tu(
@@ -670,7 +670,7 @@ vuint32m1_t test_vmacc_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_tu(
@@ -679,7 +679,7 @@ vuint32m1_t test_vmacc_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_tu(
@@ -688,7 +688,7 @@ vuint32m2_t test_vmacc_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_tu(
@@ -697,7 +697,7 @@ vuint32m2_t test_vmacc_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_tu(
@@ -706,7 +706,7 @@ vuint32m4_t test_vmacc_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_tu(
@@ -715,7 +715,7 @@ vuint32m4_t test_vmacc_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_tu(
@@ -724,7 +724,7 @@ vuint32m8_t test_vmacc_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_tu(
@@ -733,7 +733,7 @@ vuint32m8_t test_vmacc_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_tu(
@@ -742,7 +742,7 @@ vuint64m1_t test_vmacc_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_tu(
@@ -751,7 +751,7 @@ vuint64m1_t test_vmacc_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_tu(
@@ -760,7 +760,7 @@ vuint64m2_t test_vmacc_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_tu(
@@ -769,7 +769,7 @@ vuint64m2_t test_vmacc_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_tu(
@@ -778,7 +778,7 @@ vuint64m4_t test_vmacc_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_tu(
@@ -787,7 +787,7 @@ vuint64m4_t test_vmacc_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_tu(
@@ -796,7 +796,7 @@ vuint64m8_t test_vmacc_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_tum(
@@ -805,7 +805,7 @@ vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_tum(
@@ -814,7 +814,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_tum(
@@ -823,7 +823,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_tum(
@@ -832,7 +832,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_tum(
@@ -841,7 +841,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_tum(
@@ -850,7 +850,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_tum(
@@ -859,7 +859,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_tum(
@@ -868,7 +868,7 @@ vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_tum(
@@ -877,7 +877,7 @@ vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_tum(
@@ -886,7 +886,7 @@ vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_tum(
@@ -895,7 +895,7 @@ vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_tum(
@@ -904,7 +904,7 @@ vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_tum(
@@ -913,7 +913,7 @@ vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_tum(
@@ -922,7 +922,7 @@ vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_tum(
@@ -931,7 +931,7 @@ vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_tum(
@@ -940,7 +940,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_tum(
@@ -949,7 +949,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_tum(
@@ -958,7 +958,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_tum(
@@ -967,7 +967,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_tum(
@@ -976,7 +976,7 @@ vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_tum(
@@ -985,7 +985,7 @@ vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_tum(
@@ -994,7 +994,7 @@ vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_tum(
@@ -1003,7 +1003,7 @@ vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_tum(
@@ -1012,7 +1012,7 @@ vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_tum(
@@ -1021,7 +1021,7 @@ vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_tum(
@@ -1030,7 +1030,7 @@ vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tum(
@@ -1039,7 +1039,7 @@ vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tum(
@@ -1048,7 +1048,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_tum(
@@ -1057,7 +1057,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_tum(
@@ -1066,7 +1066,7 @@ vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_tum(
@@ -1075,7 +1075,7 @@ vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_tum(
@@ -1084,7 +1084,7 @@ vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_tum(
@@ -1093,7 +1093,7 @@ vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_tum(
@@ -1102,7 +1102,7 @@ vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_tum(
@@ -1111,7 +1111,7 @@ vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_tum(
@@ -1120,7 +1120,7 @@ vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_tum(
@@ -1129,7 +1129,7 @@ vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_tum(
@@ -1138,7 +1138,7 @@ vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_tum(
@@ -1147,7 +1147,7 @@ vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_tum(
@@ -1156,7 +1156,7 @@ vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_tum(
@@ -1165,7 +1165,7 @@ vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_tum(
@@ -1174,7 +1174,7 @@ vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_tum(
@@ -1183,7 +1183,7 @@ vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_tum(
@@ -1192,7 +1192,7 @@ vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_tum(
@@ -1201,7 +1201,7 @@ vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_tum(
@@ -1210,7 +1210,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_tum(
@@ -1219,7 +1219,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_tum(
@@ -1228,7 +1228,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_tum(
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_tum(
@@ -1246,7 +1246,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_tum(
@@ -1255,7 +1255,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_tum(
@@ -1264,7 +1264,7 @@ vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_tum(
@@ -1273,7 +1273,7 @@ vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_tum(
@@ -1282,7 +1282,7 @@ vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_tum(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_tum(
@@ -1300,7 +1300,7 @@ vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_tum(
@@ -1309,7 +1309,7 @@ vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_tum(
@@ -1318,7 +1318,7 @@ vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_tum(
@@ -1327,7 +1327,7 @@ vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_tum(
@@ -1336,7 +1336,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_tum(
@@ -1345,7 +1345,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_tum(
@@ -1354,7 +1354,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_tum(
@@ -1363,7 +1363,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_tum(
@@ -1372,7 +1372,7 @@ vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_tum(
@@ -1381,7 +1381,7 @@ vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_tum(
@@ -1390,7 +1390,7 @@ vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_tum(
@@ -1399,7 +1399,7 @@ vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_tum(
@@ -1408,7 +1408,7 @@ vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_tum(
@@ -1417,7 +1417,7 @@ vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_tum(
@@ -1426,7 +1426,7 @@ vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tum(
@@ -1435,7 +1435,7 @@ vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tum(
@@ -1444,7 +1444,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_tum(
@@ -1453,7 +1453,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_tum(
@@ -1462,7 +1462,7 @@ vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_tum(
@@ -1471,7 +1471,7 @@ vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_tum(
@@ -1480,7 +1480,7 @@ vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_tum(
@@ -1489,7 +1489,7 @@ vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_tum(
@@ -1498,7 +1498,7 @@ vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_tum(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_tum(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_tum(
@@ -1525,7 +1525,7 @@ vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_tum(
@@ -1534,7 +1534,7 @@ vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_tum(
@@ -1543,7 +1543,7 @@ vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_tum(
@@ -1552,7 +1552,7 @@ vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_tum(
@@ -1561,7 +1561,7 @@ vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_tum(
@@ -1570,7 +1570,7 @@ vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_tum(
@@ -1579,7 +1579,7 @@ vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_tum(
@@ -1588,7 +1588,7 @@ vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_tumu(
@@ -1597,7 +1597,7 @@ vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_tumu(
@@ -1606,7 +1606,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_tumu(
@@ -1615,7 +1615,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_tumu(
@@ -1624,7 +1624,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_tumu(
@@ -1633,7 +1633,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_tumu(
@@ -1642,7 +1642,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_tumu(
@@ -1651,7 +1651,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_tumu(
@@ -1660,7 +1660,7 @@ vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_tumu(
@@ -1669,7 +1669,7 @@ vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_tumu(
@@ -1678,7 +1678,7 @@ vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_tumu(
@@ -1687,7 +1687,7 @@ vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_tumu(
@@ -1696,7 +1696,7 @@ vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_tumu(
@@ -1705,7 +1705,7 @@ vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_tumu(
@@ -1714,7 +1714,7 @@ vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_tumu(
@@ -1723,7 +1723,7 @@ vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_tumu(
@@ -1732,7 +1732,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_tumu(
@@ -1741,7 +1741,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_tumu(
@@ -1750,7 +1750,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_tumu(
@@ -1759,7 +1759,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_tumu(
@@ -1768,7 +1768,7 @@ vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_tumu(
@@ -1777,7 +1777,7 @@ vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_tumu(
@@ -1786,7 +1786,7 @@ vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_tumu(
@@ -1795,7 +1795,7 @@ vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_tumu(
@@ -1804,7 +1804,7 @@ vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_tumu(
@@ -1813,7 +1813,7 @@ vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_tumu(
@@ -1822,7 +1822,7 @@ vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tumu(
@@ -1831,7 +1831,7 @@ vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tumu(
@@ -1840,7 +1840,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_tumu(
@@ -1849,7 +1849,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_tumu(
@@ -1858,7 +1858,7 @@ vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_tumu(
@@ -1867,7 +1867,7 @@ vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_tumu(
@@ -1876,7 +1876,7 @@ vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_tumu(
@@ -1885,7 +1885,7 @@ vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_tumu(
@@ -1894,7 +1894,7 @@ vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_tumu(
@@ -1903,7 +1903,7 @@ vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_tumu(
@@ -1912,7 +1912,7 @@ vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_tumu(
@@ -1921,7 +1921,7 @@ vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_tumu(
@@ -1930,7 +1930,7 @@ vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_tumu(
@@ -1939,7 +1939,7 @@ vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_tumu(
@@ -1948,7 +1948,7 @@ vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_tumu(
@@ -1957,7 +1957,7 @@ vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_tumu(
@@ -1966,7 +1966,7 @@ vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_tumu(
@@ -1975,7 +1975,7 @@ vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_tumu(
@@ -1984,7 +1984,7 @@ vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_tumu(
@@ -1993,7 +1993,7 @@ vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_tumu(
@@ -2002,7 +2002,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_tumu(
@@ -2011,7 +2011,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_tumu(
@@ -2020,7 +2020,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_tumu(
@@ -2029,7 +2029,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_tumu(
@@ -2038,7 +2038,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_tumu(
@@ -2047,7 +2047,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_tumu(
@@ -2056,7 +2056,7 @@ vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_tumu(
@@ -2065,7 +2065,7 @@ vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_tumu(
@@ -2074,7 +2074,7 @@ vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_tumu(
@@ -2083,7 +2083,7 @@ vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_tumu(
@@ -2092,7 +2092,7 @@ vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_tumu(
@@ -2101,7 +2101,7 @@ vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_tumu(
@@ -2110,7 +2110,7 @@ vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_tumu(
@@ -2119,7 +2119,7 @@ vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_tumu(
@@ -2128,7 +2128,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_tumu(
@@ -2137,7 +2137,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_tumu(
@@ -2146,7 +2146,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_tumu(
@@ -2155,7 +2155,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_tumu(
@@ -2164,7 +2164,7 @@ vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_tumu(
@@ -2173,7 +2173,7 @@ vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_tumu(
@@ -2182,7 +2182,7 @@ vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_tumu(
@@ -2191,7 +2191,7 @@ vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_tumu(
@@ -2200,7 +2200,7 @@ vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_tumu(
@@ -2209,7 +2209,7 @@ vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_tumu(
@@ -2218,7 +2218,7 @@ vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tumu(
@@ -2227,7 +2227,7 @@ vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tumu(
@@ -2236,7 +2236,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_tumu(
@@ -2245,7 +2245,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_tumu(
@@ -2254,7 +2254,7 @@ vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_tumu(
@@ -2263,7 +2263,7 @@ vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_tumu(
@@ -2272,7 +2272,7 @@ vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_tumu(
@@ -2281,7 +2281,7 @@ vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_tumu(
@@ -2290,7 +2290,7 @@ vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_tumu(
@@ -2299,7 +2299,7 @@ vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_tumu(
@@ -2308,7 +2308,7 @@ vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_tumu(
@@ -2317,7 +2317,7 @@ vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_tumu(
@@ -2326,7 +2326,7 @@ vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_tumu(
@@ -2335,7 +2335,7 @@ vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_tumu(
@@ -2344,7 +2344,7 @@ vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_tumu(
@@ -2353,7 +2353,7 @@ vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_tumu(
@@ -2362,7 +2362,7 @@ vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_tumu(
@@ -2371,7 +2371,7 @@ vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_tumu(
@@ -2380,7 +2380,7 @@ vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_mu(
@@ -2389,7 +2389,7 @@ vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_mu(
@@ -2398,7 +2398,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_mu(
@@ -2407,7 +2407,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_mu(
@@ -2416,7 +2416,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_mu(
@@ -2425,7 +2425,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_mu(
@@ -2434,7 +2434,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_mu(
@@ -2443,7 +2443,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_mu(
@@ -2452,7 +2452,7 @@ vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_mu(
@@ -2461,7 +2461,7 @@ vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_mu(
@@ -2470,7 +2470,7 @@ vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_mu(
@@ -2479,7 +2479,7 @@ vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_mu(
@@ -2488,7 +2488,7 @@ vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_mu(
@@ -2497,7 +2497,7 @@ vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_mu(
@@ -2506,7 +2506,7 @@ vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_mu(
@@ -2515,7 +2515,7 @@ vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_mu(
@@ -2524,7 +2524,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_mu(
@@ -2533,7 +2533,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_mu(
@@ -2542,7 +2542,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_mu(
@@ -2551,7 +2551,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_mu(
@@ -2560,7 +2560,7 @@ vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_mu(
@@ -2569,7 +2569,7 @@ vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_mu(
@@ -2578,7 +2578,7 @@ vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_mu(
@@ -2587,7 +2587,7 @@ vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_mu(
@@ -2596,7 +2596,7 @@ vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_mu(
@@ -2605,7 +2605,7 @@ vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_mu(
@@ -2614,7 +2614,7 @@ vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_mu(
@@ -2623,7 +2623,7 @@ vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_mu(
@@ -2632,7 +2632,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_mu(
@@ -2641,7 +2641,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_mu(
@@ -2650,7 +2650,7 @@ vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_mu(
@@ -2659,7 +2659,7 @@ vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_mu(
@@ -2668,7 +2668,7 @@ vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_mu(
@@ -2677,7 +2677,7 @@ vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_mu(
@@ -2686,7 +2686,7 @@ vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_mu(
@@ -2695,7 +2695,7 @@ vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_mu(
@@ -2704,7 +2704,7 @@ vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_mu(
@@ -2713,7 +2713,7 @@ vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_mu(
@@ -2722,7 +2722,7 @@ vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_mu(
@@ -2731,7 +2731,7 @@ vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_mu(
@@ -2740,7 +2740,7 @@ vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_mu(
@@ -2749,7 +2749,7 @@ vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_mu(
@@ -2758,7 +2758,7 @@ vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_mu(
@@ -2767,7 +2767,7 @@ vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_mu(
@@ -2776,7 +2776,7 @@ vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_mu(
@@ -2785,7 +2785,7 @@ vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_mu(
@@ -2794,7 +2794,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_mu(
@@ -2803,7 +2803,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_mu(
@@ -2812,7 +2812,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_mu(
@@ -2821,7 +2821,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_mu(
@@ -2830,7 +2830,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_mu(
@@ -2839,7 +2839,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_mu(
@@ -2848,7 +2848,7 @@ vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_mu(
@@ -2857,7 +2857,7 @@ vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_mu(
@@ -2866,7 +2866,7 @@ vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_mu(
@@ -2875,7 +2875,7 @@ vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_mu(
@@ -2884,7 +2884,7 @@ vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_mu(
@@ -2893,7 +2893,7 @@ vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_mu(
@@ -2902,7 +2902,7 @@ vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_mu(
@@ -2911,7 +2911,7 @@ vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_mu(
@@ -2920,7 +2920,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_mu(
@@ -2929,7 +2929,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_mu(
@@ -2938,7 +2938,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_mu(
@@ -2947,7 +2947,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_mu(
@@ -2956,7 +2956,7 @@ vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_mu(
@@ -2965,7 +2965,7 @@ vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_mu(
@@ -2974,7 +2974,7 @@ vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_mu(
@@ -2983,7 +2983,7 @@ vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_mu(
@@ -2992,7 +2992,7 @@ vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_mu(
@@ -3001,7 +3001,7 @@ vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_mu(
@@ -3010,7 +3010,7 @@ vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_mu(
@@ -3019,7 +3019,7 @@ vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_mu(
@@ -3028,7 +3028,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_mu(
@@ -3037,7 +3037,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_mu(
@@ -3046,7 +3046,7 @@ vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_mu(
@@ -3055,7 +3055,7 @@ vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_mu(
@@ -3064,7 +3064,7 @@ vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_mu(
@@ -3073,7 +3073,7 @@ vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_mu(
@@ -3082,7 +3082,7 @@ vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_mu(
@@ -3091,7 +3091,7 @@ vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_mu(
@@ -3100,7 +3100,7 @@ vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_mu(
@@ -3109,7 +3109,7 @@ vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_mu(
@@ -3118,7 +3118,7 @@ vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_mu(
@@ -3127,7 +3127,7 @@ vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_mu(
@@ -3136,7 +3136,7 @@ vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_mu(
@@ -3145,7 +3145,7 @@ vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_mu(
@@ -3154,7 +3154,7 @@ vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_mu(
@@ -3163,7 +3163,7 @@ vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_mu(
@@ -3172,6 +3172,6 @@ vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmadd.c
index b9b94ec7404a..e3ddb8af053d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmadd.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_tu(
@@ -22,7 +22,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_tu(
@@ -31,7 +31,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_tu(
@@ -40,7 +40,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_tu(
@@ -49,7 +49,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_tu(
@@ -58,7 +58,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_tu(
@@ -67,7 +67,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_tu(
@@ -76,7 +76,7 @@ vint8m1_t test_vmadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_tu(
@@ -85,7 +85,7 @@ vint8m1_t test_vmadd_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_tu(
@@ -94,7 +94,7 @@ vint8m2_t test_vmadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_tu(
@@ -103,7 +103,7 @@ vint8m2_t test_vmadd_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_tu(
@@ -112,7 +112,7 @@ vint8m4_t test_vmadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_tu(
@@ -121,7 +121,7 @@ vint8m4_t test_vmadd_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_tu(
@@ -130,7 +130,7 @@ vint8m8_t test_vmadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_tu(
@@ -139,7 +139,7 @@ vint8m8_t test_vmadd_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_tu(
@@ -148,7 +148,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_tu(
@@ -157,7 +157,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_tu(
@@ -166,7 +166,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_tu(
@@ -175,7 +175,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_tu(
@@ -184,7 +184,7 @@ vint16m1_t test_vmadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_tu(
@@ -193,7 +193,7 @@ vint16m1_t test_vmadd_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_tu(
@@ -202,7 +202,7 @@ vint16m2_t test_vmadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_tu(
@@ -211,7 +211,7 @@ vint16m2_t test_vmadd_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_tu(
@@ -220,7 +220,7 @@ vint16m4_t test_vmadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_tu(
@@ -229,7 +229,7 @@ vint16m4_t test_vmadd_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_tu(
@@ -238,7 +238,7 @@ vint16m8_t test_vmadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tu(
@@ -247,7 +247,7 @@ vint16m8_t test_vmadd_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tu(
@@ -256,7 +256,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_tu(
@@ -265,7 +265,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_tu(
@@ -274,7 +274,7 @@ vint32m1_t test_vmadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vmadd_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vmadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_tu(
@@ -301,7 +301,7 @@ vint32m2_t test_vmadd_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_tu(
@@ -310,7 +310,7 @@ vint32m4_t test_vmadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_tu(
@@ -319,7 +319,7 @@ vint32m4_t test_vmadd_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_tu(
@@ -328,7 +328,7 @@ vint32m8_t test_vmadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_tu(
@@ -337,7 +337,7 @@ vint32m8_t test_vmadd_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_tu(
@@ -346,7 +346,7 @@ vint64m1_t test_vmadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_tu(
@@ -355,7 +355,7 @@ vint64m1_t test_vmadd_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_tu(
@@ -364,7 +364,7 @@ vint64m2_t test_vmadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_tu(
@@ -373,7 +373,7 @@ vint64m2_t test_vmadd_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_tu(
@@ -382,7 +382,7 @@ vint64m4_t test_vmadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_tu(
@@ -391,7 +391,7 @@ vint64m4_t test_vmadd_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_tu(
@@ -400,7 +400,7 @@ vint64m8_t test_vmadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_tu(
@@ -409,7 +409,7 @@ vint64m8_t test_vmadd_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_tu(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_tu(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_tu(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_tu(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_tu(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_tu(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_tu(
@@ -472,7 +472,7 @@ vuint8m1_t test_vmadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_tu(
@@ -481,7 +481,7 @@ vuint8m1_t test_vmadd_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_tu(
@@ -490,7 +490,7 @@ vuint8m2_t test_vmadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_tu(
@@ -499,7 +499,7 @@ vuint8m2_t test_vmadd_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_tu(
@@ -508,7 +508,7 @@ vuint8m4_t test_vmadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_tu(
@@ -517,7 +517,7 @@ vuint8m4_t test_vmadd_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_tu(
@@ -526,7 +526,7 @@ vuint8m8_t test_vmadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_tu(
@@ -535,7 +535,7 @@ vuint8m8_t test_vmadd_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_tu(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_tu(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_tu(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_tu(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_tu(
@@ -580,7 +580,7 @@ vuint16m1_t test_vmadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_tu(
@@ -589,7 +589,7 @@ vuint16m1_t test_vmadd_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_tu(
@@ -598,7 +598,7 @@ vuint16m2_t test_vmadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_tu(
@@ -607,7 +607,7 @@ vuint16m2_t test_vmadd_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_tu(
@@ -616,7 +616,7 @@ vuint16m4_t test_vmadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_tu(
@@ -625,7 +625,7 @@ vuint16m4_t test_vmadd_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_tu(
@@ -634,7 +634,7 @@ vuint16m8_t test_vmadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tu(
@@ -643,7 +643,7 @@ vuint16m8_t test_vmadd_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tu(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_tu(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_tu(
@@ -670,7 +670,7 @@ vuint32m1_t test_vmadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_tu(
@@ -679,7 +679,7 @@ vuint32m1_t test_vmadd_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_tu(
@@ -688,7 +688,7 @@ vuint32m2_t test_vmadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_tu(
@@ -697,7 +697,7 @@ vuint32m2_t test_vmadd_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_tu(
@@ -706,7 +706,7 @@ vuint32m4_t test_vmadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_tu(
@@ -715,7 +715,7 @@ vuint32m4_t test_vmadd_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_tu(
@@ -724,7 +724,7 @@ vuint32m8_t test_vmadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_tu(
@@ -733,7 +733,7 @@ vuint32m8_t test_vmadd_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_tu(
@@ -742,7 +742,7 @@ vuint64m1_t test_vmadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_tu(
@@ -751,7 +751,7 @@ vuint64m1_t test_vmadd_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_tu(
@@ -760,7 +760,7 @@ vuint64m2_t test_vmadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_tu(
@@ -769,7 +769,7 @@ vuint64m2_t test_vmadd_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_tu(
@@ -778,7 +778,7 @@ vuint64m4_t test_vmadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_tu(
@@ -787,7 +787,7 @@ vuint64m4_t test_vmadd_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmadd_tu(vd, vs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_tu(
@@ -796,7 +796,7 @@ vuint64m8_t test_vmadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmadd_tu(vd, rs1, vs2, vl);
+ return __riscv_vmadd_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_tum(
@@ -805,7 +805,7 @@ vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_tum(
@@ -814,7 +814,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_tum(
@@ -823,7 +823,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_tum(
@@ -832,7 +832,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_tum(
@@ -841,7 +841,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_tum(
@@ -850,7 +850,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_tum(
@@ -859,7 +859,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_tum(
@@ -868,7 +868,7 @@ vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_tum(
@@ -877,7 +877,7 @@ vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_tum(
@@ -886,7 +886,7 @@ vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_tum(
@@ -895,7 +895,7 @@ vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_tum(
@@ -904,7 +904,7 @@ vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_tum(
@@ -913,7 +913,7 @@ vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_tum(
@@ -922,7 +922,7 @@ vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_tum(
@@ -931,7 +931,7 @@ vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_tum(
@@ -940,7 +940,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_tum(
@@ -949,7 +949,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_tum(
@@ -958,7 +958,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_tum(
@@ -967,7 +967,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_tum(
@@ -976,7 +976,7 @@ vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_tum(
@@ -985,7 +985,7 @@ vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_tum(
@@ -994,7 +994,7 @@ vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_tum(
@@ -1003,7 +1003,7 @@ vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_tum(
@@ -1012,7 +1012,7 @@ vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_tum(
@@ -1021,7 +1021,7 @@ vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_tum(
@@ -1030,7 +1030,7 @@ vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tum(
@@ -1039,7 +1039,7 @@ vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tum(
@@ -1048,7 +1048,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_tum(
@@ -1057,7 +1057,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_tum(
@@ -1066,7 +1066,7 @@ vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_tum(
@@ -1075,7 +1075,7 @@ vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_tum(
@@ -1084,7 +1084,7 @@ vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_tum(
@@ -1093,7 +1093,7 @@ vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_tum(
@@ -1102,7 +1102,7 @@ vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_tum(
@@ -1111,7 +1111,7 @@ vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_tum(
@@ -1120,7 +1120,7 @@ vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_tum(
@@ -1129,7 +1129,7 @@ vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_tum(
@@ -1138,7 +1138,7 @@ vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_tum(
@@ -1147,7 +1147,7 @@ vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_tum(
@@ -1156,7 +1156,7 @@ vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_tum(
@@ -1165,7 +1165,7 @@ vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_tum(
@@ -1174,7 +1174,7 @@ vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_tum(
@@ -1183,7 +1183,7 @@ vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_tum(
@@ -1192,7 +1192,7 @@ vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_tum(
@@ -1201,7 +1201,7 @@ vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_tum(
@@ -1210,7 +1210,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_tum(
@@ -1219,7 +1219,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_tum(
@@ -1228,7 +1228,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_tum(
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_tum(
@@ -1246,7 +1246,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_tum(
@@ -1255,7 +1255,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_tum(
@@ -1264,7 +1264,7 @@ vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_tum(
@@ -1273,7 +1273,7 @@ vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_tum(
@@ -1282,7 +1282,7 @@ vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_tum(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_tum(
@@ -1300,7 +1300,7 @@ vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_tum(
@@ -1309,7 +1309,7 @@ vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_tum(
@@ -1318,7 +1318,7 @@ vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_tum(
@@ -1327,7 +1327,7 @@ vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_tum(
@@ -1336,7 +1336,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_tum(
@@ -1345,7 +1345,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_tum(
@@ -1354,7 +1354,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_tum(
@@ -1363,7 +1363,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_tum(
@@ -1372,7 +1372,7 @@ vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_tum(
@@ -1381,7 +1381,7 @@ vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_tum(
@@ -1390,7 +1390,7 @@ vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_tum(
@@ -1399,7 +1399,7 @@ vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_tum(
@@ -1408,7 +1408,7 @@ vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_tum(
@@ -1417,7 +1417,7 @@ vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_tum(
@@ -1426,7 +1426,7 @@ vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tum(
@@ -1435,7 +1435,7 @@ vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tum(
@@ -1444,7 +1444,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_tum(
@@ -1453,7 +1453,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_tum(
@@ -1462,7 +1462,7 @@ vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_tum(
@@ -1471,7 +1471,7 @@ vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_tum(
@@ -1480,7 +1480,7 @@ vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_tum(
@@ -1489,7 +1489,7 @@ vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_tum(
@@ -1498,7 +1498,7 @@ vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_tum(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_tum(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_tum(
@@ -1525,7 +1525,7 @@ vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_tum(
@@ -1534,7 +1534,7 @@ vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_tum(
@@ -1543,7 +1543,7 @@ vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_tum(
@@ -1552,7 +1552,7 @@ vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_tum(
@@ -1561,7 +1561,7 @@ vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_tum(
@@ -1570,7 +1570,7 @@ vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_tum(
@@ -1579,7 +1579,7 @@ vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_tum(
@@ -1588,7 +1588,7 @@ vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmadd_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_tumu(
@@ -1597,7 +1597,7 @@ vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_tumu(
@@ -1606,7 +1606,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_tumu(
@@ -1615,7 +1615,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_tumu(
@@ -1624,7 +1624,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_tumu(
@@ -1633,7 +1633,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_tumu(
@@ -1642,7 +1642,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_tumu(
@@ -1651,7 +1651,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_tumu(
@@ -1660,7 +1660,7 @@ vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_tumu(
@@ -1669,7 +1669,7 @@ vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_tumu(
@@ -1678,7 +1678,7 @@ vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_tumu(
@@ -1687,7 +1687,7 @@ vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_tumu(
@@ -1696,7 +1696,7 @@ vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_tumu(
@@ -1705,7 +1705,7 @@ vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_tumu(
@@ -1714,7 +1714,7 @@ vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_tumu(
@@ -1723,7 +1723,7 @@ vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_tumu(
@@ -1732,7 +1732,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_tumu(
@@ -1741,7 +1741,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_tumu(
@@ -1750,7 +1750,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_tumu(
@@ -1759,7 +1759,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_tumu(
@@ -1768,7 +1768,7 @@ vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_tumu(
@@ -1777,7 +1777,7 @@ vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_tumu(
@@ -1786,7 +1786,7 @@ vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_tumu(
@@ -1795,7 +1795,7 @@ vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_tumu(
@@ -1804,7 +1804,7 @@ vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_tumu(
@@ -1813,7 +1813,7 @@ vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_tumu(
@@ -1822,7 +1822,7 @@ vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tumu(
@@ -1831,7 +1831,7 @@ vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tumu(
@@ -1840,7 +1840,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_tumu(
@@ -1849,7 +1849,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_tumu(
@@ -1858,7 +1858,7 @@ vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_tumu(
@@ -1867,7 +1867,7 @@ vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_tumu(
@@ -1876,7 +1876,7 @@ vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_tumu(
@@ -1885,7 +1885,7 @@ vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_tumu(
@@ -1894,7 +1894,7 @@ vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_tumu(
@@ -1903,7 +1903,7 @@ vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_tumu(
@@ -1912,7 +1912,7 @@ vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_tumu(
@@ -1921,7 +1921,7 @@ vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_tumu(
@@ -1930,7 +1930,7 @@ vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_tumu(
@@ -1939,7 +1939,7 @@ vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_tumu(
@@ -1948,7 +1948,7 @@ vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_tumu(
@@ -1957,7 +1957,7 @@ vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_tumu(
@@ -1966,7 +1966,7 @@ vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_tumu(
@@ -1975,7 +1975,7 @@ vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_tumu(
@@ -1984,7 +1984,7 @@ vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_tumu(
@@ -1993,7 +1993,7 @@ vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_tumu(
@@ -2002,7 +2002,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_tumu(
@@ -2011,7 +2011,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_tumu(
@@ -2020,7 +2020,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_tumu(
@@ -2029,7 +2029,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_tumu(
@@ -2038,7 +2038,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_tumu(
@@ -2047,7 +2047,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_tumu(
@@ -2056,7 +2056,7 @@ vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_tumu(
@@ -2065,7 +2065,7 @@ vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_tumu(
@@ -2074,7 +2074,7 @@ vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_tumu(
@@ -2083,7 +2083,7 @@ vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_tumu(
@@ -2092,7 +2092,7 @@ vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_tumu(
@@ -2101,7 +2101,7 @@ vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_tumu(
@@ -2110,7 +2110,7 @@ vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_tumu(
@@ -2119,7 +2119,7 @@ vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_tumu(
@@ -2128,7 +2128,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_tumu(
@@ -2137,7 +2137,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_tumu(
@@ -2146,7 +2146,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_tumu(
@@ -2155,7 +2155,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_tumu(
@@ -2164,7 +2164,7 @@ vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_tumu(
@@ -2173,7 +2173,7 @@ vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_tumu(
@@ -2182,7 +2182,7 @@ vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_tumu(
@@ -2191,7 +2191,7 @@ vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_tumu(
@@ -2200,7 +2200,7 @@ vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_tumu(
@@ -2209,7 +2209,7 @@ vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_tumu(
@@ -2218,7 +2218,7 @@ vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tumu(
@@ -2227,7 +2227,7 @@ vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tumu(
@@ -2236,7 +2236,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_tumu(
@@ -2245,7 +2245,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_tumu(
@@ -2254,7 +2254,7 @@ vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_tumu(
@@ -2263,7 +2263,7 @@ vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_tumu(
@@ -2272,7 +2272,7 @@ vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_tumu(
@@ -2281,7 +2281,7 @@ vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_tumu(
@@ -2290,7 +2290,7 @@ vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_tumu(
@@ -2299,7 +2299,7 @@ vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_tumu(
@@ -2308,7 +2308,7 @@ vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_tumu(
@@ -2317,7 +2317,7 @@ vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_tumu(
@@ -2326,7 +2326,7 @@ vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_tumu(
@@ -2335,7 +2335,7 @@ vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_tumu(
@@ -2344,7 +2344,7 @@ vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_tumu(
@@ -2353,7 +2353,7 @@ vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_tumu(
@@ -2362,7 +2362,7 @@ vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_tumu(
@@ -2371,7 +2371,7 @@ vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_tumu(
@@ -2380,7 +2380,7 @@ vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmadd_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_mu(
@@ -2389,7 +2389,7 @@ vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_mu(
@@ -2398,7 +2398,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_mu(
@@ -2407,7 +2407,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_mu(
@@ -2416,7 +2416,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_mu(
@@ -2425,7 +2425,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_mu(
@@ -2434,7 +2434,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_mu(
@@ -2443,7 +2443,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_mu(
@@ -2452,7 +2452,7 @@ vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_mu(
@@ -2461,7 +2461,7 @@ vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_mu(
@@ -2470,7 +2470,7 @@ vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_mu(
@@ -2479,7 +2479,7 @@ vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_mu(
@@ -2488,7 +2488,7 @@ vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_mu(
@@ -2497,7 +2497,7 @@ vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_mu(
@@ -2506,7 +2506,7 @@ vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_mu(
@@ -2515,7 +2515,7 @@ vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_mu(
@@ -2524,7 +2524,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_mu(
@@ -2533,7 +2533,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_mu(
@@ -2542,7 +2542,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_mu(
@@ -2551,7 +2551,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_mu(
@@ -2560,7 +2560,7 @@ vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_mu(
@@ -2569,7 +2569,7 @@ vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_mu(
@@ -2578,7 +2578,7 @@ vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_mu(
@@ -2587,7 +2587,7 @@ vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_mu(
@@ -2596,7 +2596,7 @@ vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_mu(
@@ -2605,7 +2605,7 @@ vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_mu(
@@ -2614,7 +2614,7 @@ vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_mu(
@@ -2623,7 +2623,7 @@ vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_mu(
@@ -2632,7 +2632,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_mu(
@@ -2641,7 +2641,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_mu(
@@ -2650,7 +2650,7 @@ vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_mu(
@@ -2659,7 +2659,7 @@ vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_mu(
@@ -2668,7 +2668,7 @@ vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_mu(
@@ -2677,7 +2677,7 @@ vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_mu(
@@ -2686,7 +2686,7 @@ vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_mu(
@@ -2695,7 +2695,7 @@ vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_mu(
@@ -2704,7 +2704,7 @@ vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_mu(
@@ -2713,7 +2713,7 @@ vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_mu(
@@ -2722,7 +2722,7 @@ vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_mu(
@@ -2731,7 +2731,7 @@ vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_mu(
@@ -2740,7 +2740,7 @@ vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_mu(
@@ -2749,7 +2749,7 @@ vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_mu(
@@ -2758,7 +2758,7 @@ vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_mu(
@@ -2767,7 +2767,7 @@ vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_mu(
@@ -2776,7 +2776,7 @@ vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_mu(
@@ -2785,7 +2785,7 @@ vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_mu(
@@ -2794,7 +2794,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_mu(
@@ -2803,7 +2803,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_mu(
@@ -2812,7 +2812,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_mu(
@@ -2821,7 +2821,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_mu(
@@ -2830,7 +2830,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_mu(
@@ -2839,7 +2839,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_mu(
@@ -2848,7 +2848,7 @@ vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_mu(
@@ -2857,7 +2857,7 @@ vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_mu(
@@ -2866,7 +2866,7 @@ vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_mu(
@@ -2875,7 +2875,7 @@ vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_mu(
@@ -2884,7 +2884,7 @@ vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_mu(
@@ -2893,7 +2893,7 @@ vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_mu(
@@ -2902,7 +2902,7 @@ vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_mu(
@@ -2911,7 +2911,7 @@ vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_mu(
@@ -2920,7 +2920,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_mu(
@@ -2929,7 +2929,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_mu(
@@ -2938,7 +2938,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_mu(
@@ -2947,7 +2947,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_mu(
@@ -2956,7 +2956,7 @@ vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_mu(
@@ -2965,7 +2965,7 @@ vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_mu(
@@ -2974,7 +2974,7 @@ vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_mu(
@@ -2983,7 +2983,7 @@ vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_mu(
@@ -2992,7 +2992,7 @@ vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_mu(
@@ -3001,7 +3001,7 @@ vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_mu(
@@ -3010,7 +3010,7 @@ vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_mu(
@@ -3019,7 +3019,7 @@ vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_mu(
@@ -3028,7 +3028,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_mu(
@@ -3037,7 +3037,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_mu(
@@ -3046,7 +3046,7 @@ vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_mu(
@@ -3055,7 +3055,7 @@ vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_mu(
@@ -3064,7 +3064,7 @@ vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_mu(
@@ -3073,7 +3073,7 @@ vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_mu(
@@ -3082,7 +3082,7 @@ vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_mu(
@@ -3091,7 +3091,7 @@ vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_mu(
@@ -3100,7 +3100,7 @@ vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_mu(
@@ -3109,7 +3109,7 @@ vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_mu(
@@ -3118,7 +3118,7 @@ vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_mu(
@@ -3127,7 +3127,7 @@ vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_mu(
@@ -3136,7 +3136,7 @@ vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_mu(
@@ -3145,7 +3145,7 @@ vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_mu(
@@ -3154,7 +3154,7 @@ vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_mu(
@@ -3163,7 +3163,7 @@ vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_mu(
@@ -3172,6 +3172,6 @@ vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vmadd_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmax.c
index 90f507edfd8d..45273ee43631 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmax.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmax_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmax_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmax_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmax_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmax_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmax_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmaxu.c
index 435535635c2b..02447a8d6253 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmaxu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmaxu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmaxu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmaxu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmaxu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c
index 0504f2c376ef..a98d2962f29e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu(
@@ -22,7 +22,7 @@ vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu(
@@ -31,7 +31,7 @@ vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu(
@@ -40,7 +40,7 @@ vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu(
@@ -49,7 +49,7 @@ vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu(
@@ -58,7 +58,7 @@ vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu(
@@ -67,7 +67,7 @@ vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu(
@@ -76,7 +76,7 @@ vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu(
@@ -85,7 +85,7 @@ vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu(
@@ -94,7 +94,7 @@ vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu(
@@ -103,7 +103,7 @@ vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu(
@@ -112,7 +112,7 @@ vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu(
@@ -121,7 +121,7 @@ vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu(
@@ -130,7 +130,7 @@ vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu(
@@ -139,7 +139,7 @@ vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu(
@@ -148,7 +148,7 @@ vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu(
@@ -157,7 +157,7 @@ vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, in
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu(
@@ -166,7 +166,7 @@ vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu(
@@ -175,7 +175,7 @@ vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, in
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu(
@@ -184,7 +184,7 @@ vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu(
@@ -193,7 +193,7 @@ vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu(
@@ -202,7 +202,7 @@ vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu(
@@ -211,7 +211,7 @@ vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu(
@@ -220,7 +220,7 @@ vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu(
@@ -229,7 +229,7 @@ vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu(
@@ -238,7 +238,7 @@ vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu(
@@ -247,7 +247,7 @@ vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu(
@@ -256,7 +256,7 @@ vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu(
@@ -265,7 +265,7 @@ vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, in
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu(
@@ -274,7 +274,7 @@ vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu(
@@ -301,7 +301,7 @@ vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu(
@@ -310,7 +310,7 @@ vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu(
@@ -319,7 +319,7 @@ vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu(
@@ -328,7 +328,7 @@ vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu(
@@ -337,7 +337,7 @@ vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu(
@@ -346,7 +346,7 @@ vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu(
@@ -355,7 +355,7 @@ vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu(
@@ -364,7 +364,7 @@ vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu(
@@ -373,7 +373,7 @@ vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu(
@@ -382,7 +382,7 @@ vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu(
@@ -391,7 +391,7 @@ vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu(
@@ -400,7 +400,7 @@ vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu(
@@ -409,7 +409,7 @@ vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu(
@@ -472,7 +472,7 @@ vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu(
@@ -481,7 +481,7 @@ vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu(
@@ -490,7 +490,7 @@ vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu(
@@ -499,7 +499,7 @@ vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu(
@@ -508,7 +508,7 @@ vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu(
@@ -517,7 +517,7 @@ vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu(
@@ -526,7 +526,7 @@ vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu(
@@ -535,7 +535,7 @@ vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu(
@@ -580,7 +580,7 @@ vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu(
@@ -589,7 +589,7 @@ vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu(
@@ -598,7 +598,7 @@ vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu(
@@ -607,7 +607,7 @@ vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu(
@@ -616,7 +616,7 @@ vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu(
@@ -625,7 +625,7 @@ vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu(
@@ -634,7 +634,7 @@ vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu(
@@ -643,7 +643,7 @@ vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu(
@@ -670,7 +670,7 @@ vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu(
@@ -679,7 +679,7 @@ vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu(
@@ -688,7 +688,7 @@ vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu(
@@ -697,7 +697,7 @@ vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu(
@@ -706,7 +706,7 @@ vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu(
@@ -715,7 +715,7 @@ vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu(
@@ -724,7 +724,7 @@ vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu(
@@ -733,7 +733,7 @@ vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu(
@@ -742,7 +742,7 @@ vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu(
@@ -751,7 +751,7 @@ vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu(
@@ -760,7 +760,7 @@ vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu(
@@ -769,7 +769,7 @@ vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu(
@@ -778,7 +778,7 @@ vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu(
@@ -787,7 +787,7 @@ vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu(
@@ -796,7 +796,7 @@ vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu(
@@ -805,7 +805,7 @@ vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu(
@@ -814,7 +814,7 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu(
@@ -823,7 +823,7 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu(
@@ -832,7 +832,7 @@ vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu(
@@ -841,7 +841,7 @@ vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu(
@@ -850,7 +850,7 @@ vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu(
@@ -859,7 +859,7 @@ vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu(
@@ -868,7 +868,7 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu(
@@ -877,7 +877,7 @@ vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu(
@@ -886,7 +886,7 @@ vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu(
@@ -895,7 +895,7 @@ vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu(
@@ -904,7 +904,7 @@ vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu(
@@ -913,7 +913,7 @@ vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu(
@@ -922,7 +922,7 @@ vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu(
@@ -931,6 +931,6 @@ vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) {
- return vmerge_tu(maskedoff, op1, op2, mask, vl);
+ return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq.c
index 1c3065cff34b..4a2dca3d0dad 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_mu(
@@ -130,7 +130,7 @@ vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_mu(
@@ -139,7 +139,7 @@ vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_mu(
@@ -148,7 +148,7 @@ vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_mu(
@@ -157,7 +157,7 @@ vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_mu(
@@ -166,7 +166,7 @@ vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_mu(
@@ -175,7 +175,7 @@ vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_mu(
@@ -184,7 +184,7 @@ vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_mu(
@@ -193,7 +193,7 @@ vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_mu(
@@ -202,7 +202,7 @@ vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_mu(
@@ -211,7 +211,7 @@ vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_mu(
@@ -220,7 +220,7 @@ vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_mu(
@@ -229,7 +229,7 @@ vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_mu(
@@ -238,7 +238,7 @@ vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_mu(
@@ -247,7 +247,7 @@ vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_mu(
@@ -256,7 +256,7 @@ vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_mu(
@@ -265,7 +265,7 @@ vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_mu(
@@ -274,6 +274,6 @@ vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfeq_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfeq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge.c
index e24444a5a47d..8dad2bf74128 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_mu(
@@ -130,7 +130,7 @@ vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32_mu(
@@ -139,7 +139,7 @@ vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_mu(
@@ -148,7 +148,7 @@ vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16_mu(
@@ -157,7 +157,7 @@ vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_mu(
@@ -166,7 +166,7 @@ vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8_mu(
@@ -175,7 +175,7 @@ vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_mu(
@@ -184,7 +184,7 @@ vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4_mu(
@@ -193,7 +193,7 @@ vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_mu(
@@ -202,7 +202,7 @@ vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64_mu(
@@ -211,7 +211,7 @@ vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_mu(
@@ -220,7 +220,7 @@ vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32_mu(
@@ -229,7 +229,7 @@ vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_mu(
@@ -238,7 +238,7 @@ vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16_mu(
@@ -247,7 +247,7 @@ vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_mu(
@@ -256,7 +256,7 @@ vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8_mu(
@@ -265,7 +265,7 @@ vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_mu(
@@ -274,6 +274,6 @@ vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfge_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt.c
index c13214784741..155f2bdd385d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_mu(
@@ -130,7 +130,7 @@ vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32_mu(
@@ -139,7 +139,7 @@ vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_mu(
@@ -148,7 +148,7 @@ vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16_mu(
@@ -157,7 +157,7 @@ vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_mu(
@@ -166,7 +166,7 @@ vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8_mu(
@@ -175,7 +175,7 @@ vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_mu(
@@ -184,7 +184,7 @@ vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4_mu(
@@ -193,7 +193,7 @@ vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_mu(
@@ -202,7 +202,7 @@ vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64_mu(
@@ -211,7 +211,7 @@ vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_mu(
@@ -220,7 +220,7 @@ vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32_mu(
@@ -229,7 +229,7 @@ vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_mu(
@@ -238,7 +238,7 @@ vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16_mu(
@@ -247,7 +247,7 @@ vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_mu(
@@ -256,7 +256,7 @@ vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8_mu(
@@ -265,7 +265,7 @@ vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_mu(
@@ -274,6 +274,6 @@ vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfgt_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle.c
index 848b7c3c5a5d..3dd6307f3691 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_mu(
@@ -130,7 +130,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_mu(
@@ -139,7 +139,7 @@ vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_mu(
@@ -148,7 +148,7 @@ vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_mu(
@@ -157,7 +157,7 @@ vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_mu(
@@ -166,7 +166,7 @@ vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_mu(
@@ -175,7 +175,7 @@ vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_mu(
@@ -184,7 +184,7 @@ vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_mu(
@@ -193,7 +193,7 @@ vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_mu(
@@ -202,7 +202,7 @@ vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_mu(
@@ -211,7 +211,7 @@ vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_mu(
@@ -220,7 +220,7 @@ vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_mu(
@@ -229,7 +229,7 @@ vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_mu(
@@ -238,7 +238,7 @@ vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_mu(
@@ -247,7 +247,7 @@ vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_mu(
@@ -256,7 +256,7 @@ vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_mu(
@@ -265,7 +265,7 @@ vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_mu(
@@ -274,6 +274,6 @@ vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfle_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt.c
index 1311e8331284..4f792bdeb295 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_mu(
@@ -130,7 +130,7 @@ vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_mu(
@@ -139,7 +139,7 @@ vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_mu(
@@ -148,7 +148,7 @@ vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_mu(
@@ -157,7 +157,7 @@ vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_mu(
@@ -166,7 +166,7 @@ vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_mu(
@@ -175,7 +175,7 @@ vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_mu(
@@ -184,7 +184,7 @@ vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_mu(
@@ -193,7 +193,7 @@ vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_mu(
@@ -202,7 +202,7 @@ vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_mu(
@@ -211,7 +211,7 @@ vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_mu(
@@ -220,7 +220,7 @@ vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_mu(
@@ -229,7 +229,7 @@ vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_mu(
@@ -238,7 +238,7 @@ vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_mu(
@@ -247,7 +247,7 @@ vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_mu(
@@ -256,7 +256,7 @@ vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_mu(
@@ -265,7 +265,7 @@ vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_mu(
@@ -274,6 +274,6 @@ vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmflt_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vmflt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne.c
index 68a47e6cc05b..e688a04bc6d2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_mu(
@@ -130,7 +130,7 @@ vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_mu(
@@ -139,7 +139,7 @@ vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_mu(
@@ -148,7 +148,7 @@ vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_mu(
@@ -157,7 +157,7 @@ vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_mu(
@@ -166,7 +166,7 @@ vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_mu(
@@ -175,7 +175,7 @@ vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_mu(
@@ -184,7 +184,7 @@ vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_mu(
@@ -193,7 +193,7 @@ vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_mu(
@@ -202,7 +202,7 @@ vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_mu(
@@ -211,7 +211,7 @@ vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_mu(
@@ -220,7 +220,7 @@ vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_mu(
@@ -229,7 +229,7 @@ vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_mu(
@@ -238,7 +238,7 @@ vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_mu(
@@ -247,7 +247,7 @@ vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_mu(
@@ -256,7 +256,7 @@ vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_mu(
@@ -265,7 +265,7 @@ vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_mu(
@@ -274,6 +274,6 @@ vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmfne_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vmfne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmin.c
index 0d40440b3bbc..cc9f59eedafd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmin.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmin_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmin_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmin_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmin_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmin_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmin_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vminu.c
index 2d8571edc021..124ce4a93f0e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vminu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vminu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vminu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vminu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vminu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vminu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vminu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vminu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsbf.c
index 52c53e521dec..a487877d59b5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsbf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsbf.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbf_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) {
- return vmsbf_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsbf_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b2_mu(
@@ -21,7 +21,7 @@ vbool1_t test_vmsbf_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbf_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) {
- return vmsbf_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsbf_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b4_mu(
@@ -30,7 +30,7 @@ vbool2_t test_vmsbf_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) {
- return vmsbf_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsbf_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b8_mu(
@@ -39,7 +39,7 @@ vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbf_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) {
- return vmsbf_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsbf_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b16_mu(
@@ -48,7 +48,7 @@ vbool8_t test_vmsbf_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbf_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) {
- return vmsbf_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsbf_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b32_mu(
@@ -57,7 +57,7 @@ vbool16_t test_vmsbf_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbf_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) {
- return vmsbf_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsbf_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsbf_m_b64_mu(
@@ -66,6 +66,6 @@ vbool32_t test_vmsbf_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbf_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) {
- return vmsbf_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsbf_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmseq.c
index 4fa8806a8437..57b1ff1ad9eb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmseq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmseq.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_mu(
@@ -400,7 +400,7 @@ vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_mu(
@@ -409,7 +409,7 @@ vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_mu(
@@ -418,7 +418,7 @@ vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_mu(
@@ -427,7 +427,7 @@ vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_mu(
@@ -436,7 +436,7 @@ vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_mu(
@@ -445,7 +445,7 @@ vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_mu(
@@ -454,7 +454,7 @@ vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_mu(
@@ -463,7 +463,7 @@ vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_mu(
@@ -472,7 +472,7 @@ vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_mu(
@@ -481,7 +481,7 @@ vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_mu(
@@ -490,7 +490,7 @@ vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_mu(
@@ -499,7 +499,7 @@ vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_mu(
@@ -508,7 +508,7 @@ vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_mu(
@@ -517,7 +517,7 @@ vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_mu(
@@ -526,7 +526,7 @@ vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_mu(
@@ -535,7 +535,7 @@ vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_mu(
@@ -544,7 +544,7 @@ vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_mu(
@@ -553,7 +553,7 @@ vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_mu(
@@ -562,7 +562,7 @@ vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_mu(
@@ -571,7 +571,7 @@ vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_mu(
@@ -580,7 +580,7 @@ vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_mu(
@@ -589,7 +589,7 @@ vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_mu(
@@ -598,7 +598,7 @@ vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_mu(
@@ -607,7 +607,7 @@ vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_mu(
@@ -616,7 +616,7 @@ vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_mu(
@@ -625,7 +625,7 @@ vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_mu(
@@ -634,7 +634,7 @@ vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_mu(
@@ -643,7 +643,7 @@ vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_mu(
@@ -652,7 +652,7 @@ vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_mu(
@@ -661,7 +661,7 @@ vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_mu(
@@ -670,7 +670,7 @@ vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_mu(
@@ -679,7 +679,7 @@ vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_mu(
@@ -688,7 +688,7 @@ vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_mu(
@@ -697,7 +697,7 @@ vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_mu(
@@ -706,7 +706,7 @@ vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_mu(
@@ -715,7 +715,7 @@ vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_mu(
@@ -724,7 +724,7 @@ vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_mu(
@@ -733,7 +733,7 @@ vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_mu(
@@ -742,7 +742,7 @@ vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_mu(
@@ -751,7 +751,7 @@ vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_mu(
@@ -760,7 +760,7 @@ vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_mu(
@@ -769,7 +769,7 @@ vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_mu(
@@ -778,7 +778,7 @@ vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_mu(
@@ -787,7 +787,7 @@ vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_mu(
@@ -796,6 +796,6 @@ vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmseq_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmseq_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsge.c
index d0b1d911b950..543df902572e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsge.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8_mu(
@@ -400,6 +400,6 @@ vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsge_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsge_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgeu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgeu.c
index 96ff696c2d3f..572f4bdda018 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgeu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgeu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8_mu(
@@ -400,6 +400,6 @@ vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgeu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsgeu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgt.c
index 6f9c8e18c2e9..1cda0cc01259 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_mu(
@@ -400,6 +400,6 @@ vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgt_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsgt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgtu.c
index e40750fcb252..4f9d28a0c11a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgtu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsgtu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_mu(
@@ -400,6 +400,6 @@ vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsgtu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsgtu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsif.c
index d0af32514575..6954b2c0e094 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsif.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsif.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsif_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) {
- return vmsif_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsif_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b2_mu(
@@ -21,7 +21,7 @@ vbool1_t test_vmsif_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsif_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) {
- return vmsif_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsif_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b4_mu(
@@ -30,7 +30,7 @@ vbool2_t test_vmsif_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) {
- return vmsif_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsif_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b8_mu(
@@ -39,7 +39,7 @@ vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsif_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) {
- return vmsif_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsif_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b16_mu(
@@ -48,7 +48,7 @@ vbool8_t test_vmsif_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsif_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) {
- return vmsif_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsif_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b32_mu(
@@ -57,7 +57,7 @@ vbool16_t test_vmsif_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsif_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) {
- return vmsif_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsif_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsif_m_b64_mu(
@@ -66,6 +66,6 @@ vbool32_t test_vmsif_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsif_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) {
- return vmsif_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsif_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsle.c
index dff4b9db0bcc..eaa28544ff74 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsle.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_mu(
@@ -400,6 +400,6 @@ vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsle_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsle_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsleu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsleu.c
index 7ad4ee36aa1d..d529b85548f4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsleu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsleu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_mu(
@@ -400,6 +400,6 @@ vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsleu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsleu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmslt.c
index 4ba2ca58cd55..98f8f5e90357 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmslt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmslt.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_mu(
@@ -400,6 +400,6 @@ vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmslt_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmslt_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsltu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsltu.c
index f2a8e1f3c2bb..1a3a2b324633 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsltu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsltu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_mu(
@@ -400,6 +400,6 @@ vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsltu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsltu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsne.c
index c5ed17805cd3..a3cce9608a03 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsne.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_mu(
@@ -22,7 +22,7 @@ vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_mu(
@@ -31,7 +31,7 @@ vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_mu(
@@ -40,7 +40,7 @@ vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_mu(
@@ -49,7 +49,7 @@ vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_mu(
@@ -58,7 +58,7 @@ vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_mu(
@@ -67,7 +67,7 @@ vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_mu(
@@ -76,7 +76,7 @@ vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_mu(
@@ -85,7 +85,7 @@ vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_mu(
@@ -94,7 +94,7 @@ vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_mu(
@@ -103,7 +103,7 @@ vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_mu(
@@ -112,7 +112,7 @@ vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_mu(
@@ -121,7 +121,7 @@ vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_mu(
@@ -130,7 +130,7 @@ vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_mu(
@@ -139,7 +139,7 @@ vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_mu(
@@ -148,7 +148,7 @@ vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_mu(
@@ -157,7 +157,7 @@ vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_mu(
@@ -166,7 +166,7 @@ vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_mu(
@@ -175,7 +175,7 @@ vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_mu(
@@ -184,7 +184,7 @@ vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_mu(
@@ -193,7 +193,7 @@ vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_mu(
@@ -202,7 +202,7 @@ vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_mu(
@@ -211,7 +211,7 @@ vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_mu(
@@ -220,7 +220,7 @@ vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_mu(
@@ -229,7 +229,7 @@ vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_mu(
@@ -238,7 +238,7 @@ vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_mu(
@@ -247,7 +247,7 @@ vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_mu(
@@ -256,7 +256,7 @@ vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_mu(
@@ -265,7 +265,7 @@ vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_mu(
@@ -274,7 +274,7 @@ vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_mu(
@@ -283,7 +283,7 @@ vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_mu(
@@ -292,7 +292,7 @@ vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_mu(
@@ -301,7 +301,7 @@ vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_mu(
@@ -310,7 +310,7 @@ vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_mu(
@@ -319,7 +319,7 @@ vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_mu(
@@ -328,7 +328,7 @@ vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_mu(
@@ -337,7 +337,7 @@ vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_mu(
@@ -346,7 +346,7 @@ vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_mu(
@@ -355,7 +355,7 @@ vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_mu(
@@ -364,7 +364,7 @@ vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_mu(
@@ -373,7 +373,7 @@ vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_mu(
@@ -382,7 +382,7 @@ vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_mu(
@@ -391,7 +391,7 @@ vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_mu(
@@ -400,7 +400,7 @@ vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_mu(
@@ -409,7 +409,7 @@ vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_mu(
@@ -418,7 +418,7 @@ vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_mu(
@@ -427,7 +427,7 @@ vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_mu(
@@ -436,7 +436,7 @@ vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_mu(
@@ -445,7 +445,7 @@ vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_mu(
@@ -454,7 +454,7 @@ vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_mu(
@@ -463,7 +463,7 @@ vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_mu(
@@ -472,7 +472,7 @@ vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_mu(
@@ -481,7 +481,7 @@ vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_mu(
@@ -490,7 +490,7 @@ vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_mu(
@@ -499,7 +499,7 @@ vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_mu(
@@ -508,7 +508,7 @@ vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_mu(
@@ -517,7 +517,7 @@ vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_mu(
@@ -526,7 +526,7 @@ vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_mu(
@@ -535,7 +535,7 @@ vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_mu(
@@ -544,7 +544,7 @@ vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_mu(
@@ -553,7 +553,7 @@ vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_mu(
@@ -562,7 +562,7 @@ vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_mu(
@@ -571,7 +571,7 @@ vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_mu(
@@ -580,7 +580,7 @@ vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_mu(
@@ -589,7 +589,7 @@ vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_mu(
@@ -598,7 +598,7 @@ vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_mu(
@@ -607,7 +607,7 @@ vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_mu(
@@ -616,7 +616,7 @@ vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_mu(
@@ -625,7 +625,7 @@ vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_mu(
@@ -634,7 +634,7 @@ vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_mu(
@@ -643,7 +643,7 @@ vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_mu(
@@ -652,7 +652,7 @@ vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_mu(
@@ -661,7 +661,7 @@ vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_mu(
@@ -670,7 +670,7 @@ vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_mu(
@@ -679,7 +679,7 @@ vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_mu(
@@ -688,7 +688,7 @@ vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_mu(
@@ -697,7 +697,7 @@ vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_mu(
@@ -706,7 +706,7 @@ vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_mu(
@@ -715,7 +715,7 @@ vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_mu(
@@ -724,7 +724,7 @@ vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_mu(
@@ -733,7 +733,7 @@ vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_mu(
@@ -742,7 +742,7 @@ vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_mu(
@@ -751,7 +751,7 @@ vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_mu(
@@ -760,7 +760,7 @@ vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_mu(
@@ -769,7 +769,7 @@ vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_mu(
@@ -778,7 +778,7 @@ vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_mu(
@@ -787,7 +787,7 @@ vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_mu(
@@ -796,6 +796,6 @@ vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsne_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmsne_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsof.c
index d4e96b549d6a..74843130e523 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsof.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmsof.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsof_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) {
- return vmsof_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsof_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b2_mu(
@@ -21,7 +21,7 @@ vbool1_t test_vmsof_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsof_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) {
- return vmsof_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsof_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b4_mu(
@@ -30,7 +30,7 @@ vbool2_t test_vmsof_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) {
- return vmsof_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsof_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b8_mu(
@@ -39,7 +39,7 @@ vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsof_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) {
- return vmsof_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsof_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b16_mu(
@@ -48,7 +48,7 @@ vbool8_t test_vmsof_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsof_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) {
- return vmsof_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsof_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b32_mu(
@@ -57,7 +57,7 @@ vbool16_t test_vmsof_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsof_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) {
- return vmsof_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsof_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vmsof_m_b64_mu(
@@ -66,6 +66,6 @@ vbool32_t test_vmsof_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsof_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) {
- return vmsof_mu(mask, maskedoff, op1, vl);
+ return __riscv_vmsof_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmul.c
index 63c3bf028c1a..8363855a63fc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmul.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_tu(
@@ -408,7 +408,7 @@ vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_tu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_tu(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_tu(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_tu(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_tu(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_tu(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_tu(
@@ -471,7 +471,7 @@ vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_tu(
@@ -480,7 +480,7 @@ vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_tu(
@@ -489,7 +489,7 @@ vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_tu(
@@ -498,7 +498,7 @@ vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_tu(
@@ -507,7 +507,7 @@ vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_tu(
@@ -516,7 +516,7 @@ vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_tu(
@@ -525,7 +525,7 @@ vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_tu(
@@ -534,7 +534,7 @@ vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_tu(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_tu(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_tu(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_tu(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_tu(
@@ -579,7 +579,7 @@ vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_tu(
@@ -588,7 +588,7 @@ vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_tu(
@@ -597,7 +597,7 @@ vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_tu(
@@ -606,7 +606,7 @@ vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_tu(
@@ -615,7 +615,7 @@ vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_tu(
@@ -624,7 +624,7 @@ vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_tu(
@@ -633,7 +633,7 @@ vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tu(
@@ -642,7 +642,7 @@ vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tu(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_tu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_tu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_tu(
@@ -678,7 +678,7 @@ vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_tu(
@@ -687,7 +687,7 @@ vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_tu(
@@ -696,7 +696,7 @@ vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_tu(
@@ -705,7 +705,7 @@ vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_tu(
@@ -714,7 +714,7 @@ vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_tu(
@@ -723,7 +723,7 @@ vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_tu(
@@ -732,7 +732,7 @@ vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_tu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_tu(
@@ -750,7 +750,7 @@ vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_tu(
@@ -759,7 +759,7 @@ vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_tu(
@@ -768,7 +768,7 @@ vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_tu(
@@ -777,7 +777,7 @@ vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_tu(
@@ -786,7 +786,7 @@ vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_tu(
@@ -795,7 +795,7 @@ vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_tum(
@@ -804,7 +804,7 @@ vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_tum(
@@ -813,7 +813,7 @@ vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_tum(
@@ -822,7 +822,7 @@ vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_tum(
@@ -831,7 +831,7 @@ vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_tum(
@@ -840,7 +840,7 @@ vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_tum(
@@ -849,7 +849,7 @@ vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_tum(
@@ -858,7 +858,7 @@ vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_tum(
@@ -867,7 +867,7 @@ vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_tum(
@@ -876,7 +876,7 @@ vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_tum(
@@ -885,7 +885,7 @@ vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_tum(
@@ -894,7 +894,7 @@ vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_tum(
@@ -903,7 +903,7 @@ vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_tum(
@@ -912,7 +912,7 @@ vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_tum(
@@ -921,7 +921,7 @@ vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_tum(
@@ -930,7 +930,7 @@ vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_tum(
@@ -939,7 +939,7 @@ vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_tum(
@@ -948,7 +948,7 @@ vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_tum(
@@ -957,7 +957,7 @@ vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_tum(
@@ -966,7 +966,7 @@ vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_tum(
@@ -975,7 +975,7 @@ vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_tum(
@@ -984,7 +984,7 @@ vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_tum(
@@ -993,7 +993,7 @@ vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_tum(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_tum(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_tum(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_tum(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tum(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tum(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_tum(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_tum(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_tum(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_tum(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_tum(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_tum(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_tum(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_tum(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_tum(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_tum(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_tum(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_tum(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_tum(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_tum(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_tum(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_tum(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_tum(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_tum(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_tum(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_tum(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_tum(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_tum(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_tum(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_tum(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_tum(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_tum(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_tum(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_tum(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_tum(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_tum(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_tum(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_tum(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_tum(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_tum(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_tum(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_tum(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_tum(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_tum(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_tum(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_tum(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_tum(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_tum(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tum(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tum(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_tum(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_tum(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_tum(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_tum(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_tum(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_tum(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_tum(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_tum(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_tum(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_tum(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_tum(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_tum(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_tum(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_tum(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_tum(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_tum(
@@ -1587,7 +1587,7 @@ vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_tumu(
@@ -1596,7 +1596,7 @@ vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_tumu(
@@ -1605,7 +1605,7 @@ vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_tumu(
@@ -1614,7 +1614,7 @@ vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_tumu(
@@ -1623,7 +1623,7 @@ vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_tumu(
@@ -1632,7 +1632,7 @@ vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_tumu(
@@ -1641,7 +1641,7 @@ vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_tumu(
@@ -1650,7 +1650,7 @@ vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_tumu(
@@ -1659,7 +1659,7 @@ vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_tumu(
@@ -1668,7 +1668,7 @@ vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_tumu(
@@ -1677,7 +1677,7 @@ vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_tumu(
@@ -1686,7 +1686,7 @@ vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_tumu(
@@ -1695,7 +1695,7 @@ vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_tumu(
@@ -1704,7 +1704,7 @@ vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_tumu(
@@ -1713,7 +1713,7 @@ vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_tumu(
@@ -1722,7 +1722,7 @@ vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_tumu(
@@ -1731,7 +1731,7 @@ vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_tumu(
@@ -1740,7 +1740,7 @@ vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_tumu(
@@ -1749,7 +1749,7 @@ vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_tumu(
@@ -1758,7 +1758,7 @@ vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_tumu(
@@ -1767,7 +1767,7 @@ vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_tumu(
@@ -1776,7 +1776,7 @@ vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_tumu(
@@ -1785,7 +1785,7 @@ vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_tumu(
@@ -1794,7 +1794,7 @@ vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_tumu(
@@ -1803,7 +1803,7 @@ vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_tumu(
@@ -1812,7 +1812,7 @@ vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_tumu(
@@ -1821,7 +1821,7 @@ vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tumu(
@@ -1830,7 +1830,7 @@ vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tumu(
@@ -1839,7 +1839,7 @@ vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_tumu(
@@ -1848,7 +1848,7 @@ vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_tumu(
@@ -1857,7 +1857,7 @@ vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_tumu(
@@ -1866,7 +1866,7 @@ vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_tumu(
@@ -1875,7 +1875,7 @@ vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_tumu(
@@ -1884,7 +1884,7 @@ vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_tumu(
@@ -1893,7 +1893,7 @@ vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_tumu(
@@ -1902,7 +1902,7 @@ vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_tumu(
@@ -1911,7 +1911,7 @@ vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_tumu(
@@ -1920,7 +1920,7 @@ vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_tumu(
@@ -1929,7 +1929,7 @@ vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_tumu(
@@ -1938,7 +1938,7 @@ vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_tumu(
@@ -1947,7 +1947,7 @@ vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_tumu(
@@ -1956,7 +1956,7 @@ vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_tumu(
@@ -1965,7 +1965,7 @@ vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_tumu(
@@ -1974,7 +1974,7 @@ vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_tumu(
@@ -1983,7 +1983,7 @@ vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_tumu(
@@ -1992,7 +1992,7 @@ vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_tumu(
@@ -2001,7 +2001,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_tumu(
@@ -2010,7 +2010,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_tumu(
@@ -2019,7 +2019,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_tumu(
@@ -2028,7 +2028,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_tumu(
@@ -2037,7 +2037,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_tumu(
@@ -2046,7 +2046,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_tumu(
@@ -2055,7 +2055,7 @@ vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_tumu(
@@ -2064,7 +2064,7 @@ vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_tumu(
@@ -2073,7 +2073,7 @@ vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_tumu(
@@ -2082,7 +2082,7 @@ vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_tumu(
@@ -2091,7 +2091,7 @@ vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_tumu(
@@ -2100,7 +2100,7 @@ vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_tumu(
@@ -2109,7 +2109,7 @@ vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_tumu(
@@ -2118,7 +2118,7 @@ vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_tumu(
@@ -2127,7 +2127,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_tumu(
@@ -2136,7 +2136,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_tumu(
@@ -2145,7 +2145,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_tumu(
@@ -2154,7 +2154,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_tumu(
@@ -2163,7 +2163,7 @@ vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_tumu(
@@ -2172,7 +2172,7 @@ vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_tumu(
@@ -2181,7 +2181,7 @@ vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_tumu(
@@ -2190,7 +2190,7 @@ vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_tumu(
@@ -2199,7 +2199,7 @@ vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_tumu(
@@ -2208,7 +2208,7 @@ vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_tumu(
@@ -2217,7 +2217,7 @@ vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tumu(
@@ -2226,7 +2226,7 @@ vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tumu(
@@ -2235,7 +2235,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_tumu(
@@ -2244,7 +2244,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_tumu(
@@ -2253,7 +2253,7 @@ vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_tumu(
@@ -2262,7 +2262,7 @@ vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_tumu(
@@ -2271,7 +2271,7 @@ vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_tumu(
@@ -2280,7 +2280,7 @@ vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_tumu(
@@ -2289,7 +2289,7 @@ vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_tumu(
@@ -2298,7 +2298,7 @@ vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_tumu(
@@ -2307,7 +2307,7 @@ vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_tumu(
@@ -2316,7 +2316,7 @@ vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_tumu(
@@ -2325,7 +2325,7 @@ vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_tumu(
@@ -2334,7 +2334,7 @@ vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_tumu(
@@ -2343,7 +2343,7 @@ vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_tumu(
@@ -2352,7 +2352,7 @@ vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_tumu(
@@ -2361,7 +2361,7 @@ vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_tumu(
@@ -2370,7 +2370,7 @@ vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_tumu(
@@ -2379,7 +2379,7 @@ vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_mu(
@@ -2388,7 +2388,7 @@ vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_mu(
@@ -2397,7 +2397,7 @@ vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_mu(
@@ -2406,7 +2406,7 @@ vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_mu(
@@ -2415,7 +2415,7 @@ vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_mu(
@@ -2424,7 +2424,7 @@ vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_mu(
@@ -2433,7 +2433,7 @@ vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_mu(
@@ -2442,7 +2442,7 @@ vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_mu(
@@ -2451,7 +2451,7 @@ vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_mu(
@@ -2460,7 +2460,7 @@ vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_mu(
@@ -2469,7 +2469,7 @@ vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_mu(
@@ -2478,7 +2478,7 @@ vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_mu(
@@ -2487,7 +2487,7 @@ vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_mu(
@@ -2496,7 +2496,7 @@ vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_mu(
@@ -2505,7 +2505,7 @@ vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_mu(
@@ -2514,7 +2514,7 @@ vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_mu(
@@ -2523,7 +2523,7 @@ vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_mu(
@@ -2532,7 +2532,7 @@ vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_mu(
@@ -2541,7 +2541,7 @@ vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_mu(
@@ -2550,7 +2550,7 @@ vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_mu(
@@ -2559,7 +2559,7 @@ vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_mu(
@@ -2568,7 +2568,7 @@ vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_mu(
@@ -2577,7 +2577,7 @@ vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_mu(
@@ -2586,7 +2586,7 @@ vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_mu(
@@ -2595,7 +2595,7 @@ vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_mu(
@@ -2604,7 +2604,7 @@ vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_mu(
@@ -2613,7 +2613,7 @@ vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_mu(
@@ -2622,7 +2622,7 @@ vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_mu(
@@ -2631,7 +2631,7 @@ vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_mu(
@@ -2640,7 +2640,7 @@ vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_mu(
@@ -2649,7 +2649,7 @@ vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_mu(
@@ -2658,7 +2658,7 @@ vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_mu(
@@ -2667,7 +2667,7 @@ vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_mu(
@@ -2676,7 +2676,7 @@ vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_mu(
@@ -2685,7 +2685,7 @@ vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_mu(
@@ -2694,7 +2694,7 @@ vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_mu(
@@ -2703,7 +2703,7 @@ vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_mu(
@@ -2712,7 +2712,7 @@ vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_mu(
@@ -2721,7 +2721,7 @@ vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_mu(
@@ -2730,7 +2730,7 @@ vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_mu(
@@ -2739,7 +2739,7 @@ vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_mu(
@@ -2748,7 +2748,7 @@ vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_mu(
@@ -2757,7 +2757,7 @@ vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_mu(
@@ -2766,7 +2766,7 @@ vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_mu(
@@ -2775,7 +2775,7 @@ vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_mu(
@@ -2784,7 +2784,7 @@ vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_mu(
@@ -2793,7 +2793,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_mu(
@@ -2802,7 +2802,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_mu(
@@ -2811,7 +2811,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_mu(
@@ -2820,7 +2820,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_mu(
@@ -2829,7 +2829,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_mu(
@@ -2838,7 +2838,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_mu(
@@ -2847,7 +2847,7 @@ vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_mu(
@@ -2856,7 +2856,7 @@ vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_mu(
@@ -2865,7 +2865,7 @@ vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_mu(
@@ -2874,7 +2874,7 @@ vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_mu(
@@ -2883,7 +2883,7 @@ vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_mu(
@@ -2892,7 +2892,7 @@ vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_mu(
@@ -2901,7 +2901,7 @@ vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_mu(
@@ -2910,7 +2910,7 @@ vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_mu(
@@ -2919,7 +2919,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_mu(
@@ -2928,7 +2928,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_mu(
@@ -2937,7 +2937,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_mu(
@@ -2946,7 +2946,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_mu(
@@ -2955,7 +2955,7 @@ vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_mu(
@@ -2964,7 +2964,7 @@ vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_mu(
@@ -2973,7 +2973,7 @@ vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_mu(
@@ -2982,7 +2982,7 @@ vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_mu(
@@ -2991,7 +2991,7 @@ vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_mu(
@@ -3000,7 +3000,7 @@ vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_mu(
@@ -3009,7 +3009,7 @@ vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_mu(
@@ -3018,7 +3018,7 @@ vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_mu(
@@ -3027,7 +3027,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_mu(
@@ -3036,7 +3036,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_mu(
@@ -3045,7 +3045,7 @@ vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_mu(
@@ -3054,7 +3054,7 @@ vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_mu(
@@ -3063,7 +3063,7 @@ vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_mu(
@@ -3072,7 +3072,7 @@ vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_mu(
@@ -3081,7 +3081,7 @@ vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_mu(
@@ -3090,7 +3090,7 @@ vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_mu(
@@ -3099,7 +3099,7 @@ vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_mu(
@@ -3108,7 +3108,7 @@ vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_mu(
@@ -3117,7 +3117,7 @@ vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_mu(
@@ -3126,7 +3126,7 @@ vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_mu(
@@ -3135,7 +3135,7 @@ vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_mu(
@@ -3144,7 +3144,7 @@ vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_mu(
@@ -3153,7 +3153,7 @@ vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_mu(
@@ -3162,7 +3162,7 @@ vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_mu(
@@ -3171,6 +3171,6 @@ vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulh.c
index 586b338d13c3..8484f9239dc1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulh.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulh.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmulh_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmulh_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmulh_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmulh_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhsu.c
index 6ad28b86d46c..fbe603523ca7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhsu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhsu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhu.c
index a15bb7d63c22..8f8770098057 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmulhu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c
index bf1539ceb721..2ba02b4d532f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmv_v_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8_tu(
@@ -22,7 +22,7 @@ vint8mf8_t test_vmv_v_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmv_v_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4_tu(
@@ -31,7 +31,7 @@ vint8mf8_t test_vmv_v_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmv_v_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4_tu(
@@ -40,7 +40,7 @@ vint8mf4_t test_vmv_v_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmv_v_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2_tu(
@@ -49,7 +49,7 @@ vint8mf4_t test_vmv_v_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmv_v_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2_tu(
@@ -58,7 +58,7 @@ vint8mf2_t test_vmv_v_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmv_v_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8m1_tu(
@@ -67,7 +67,7 @@ vint8mf2_t test_vmv_v_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmv_v_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i8m1_tu(
@@ -76,7 +76,7 @@ vint8m1_t test_vmv_v_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmv_v_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8m2_tu(
@@ -85,7 +85,7 @@ vint8m1_t test_vmv_v_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmv_v_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i8m2_tu(
@@ -94,7 +94,7 @@ vint8m2_t test_vmv_v_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmv_v_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8m4_tu(
@@ -103,7 +103,7 @@ vint8m2_t test_vmv_v_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmv_v_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i8m4_tu(
@@ -112,7 +112,7 @@ vint8m4_t test_vmv_v_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmv_v_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i8m8_tu(
@@ -121,7 +121,7 @@ vint8m4_t test_vmv_v_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmv_v_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i8m8_tu(
@@ -130,7 +130,7 @@ vint8m8_t test_vmv_v_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmv_v_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4_tu(
@@ -139,7 +139,7 @@ vint8m8_t test_vmv_v_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmv_v_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4_tu(
@@ -148,7 +148,7 @@ vint16mf4_t test_vmv_v_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmv_v_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2_tu(
@@ -157,7 +157,7 @@ vint16mf4_t test_vmv_v_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmv_v_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2_tu(
@@ -166,7 +166,7 @@ vint16mf2_t test_vmv_v_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmv_v_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16m1_tu(
@@ -175,7 +175,7 @@ vint16mf2_t test_vmv_v_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmv_v_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i16m1_tu(
@@ -184,7 +184,7 @@ vint16m1_t test_vmv_v_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmv_v_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16m2_tu(
@@ -193,7 +193,7 @@ vint16m1_t test_vmv_v_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmv_v_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i16m2_tu(
@@ -202,7 +202,7 @@ vint16m2_t test_vmv_v_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmv_v_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16m4_tu(
@@ -211,7 +211,7 @@ vint16m2_t test_vmv_v_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmv_v_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i16m4_tu(
@@ -220,7 +220,7 @@ vint16m4_t test_vmv_v_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmv_v_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i16m8_tu(
@@ -229,7 +229,7 @@ vint16m4_t test_vmv_v_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmv_v_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i16m8_tu(
@@ -238,7 +238,7 @@ vint16m8_t test_vmv_v_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmv_v_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_tu(
@@ -247,7 +247,7 @@ vint16m8_t test_vmv_v_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmv_v_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2_tu(
@@ -256,7 +256,7 @@ vint32mf2_t test_vmv_v_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmv_v_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32m1_tu(
@@ -265,7 +265,7 @@ vint32mf2_t test_vmv_v_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmv_v_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i32m1_tu(
@@ -274,7 +274,7 @@ vint32m1_t test_vmv_v_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmv_v_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vmv_v_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmv_v_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i32m2_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vmv_v_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmv_v_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32m4_tu(
@@ -301,7 +301,7 @@ vint32m2_t test_vmv_v_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmv_v_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i32m4_tu(
@@ -310,7 +310,7 @@ vint32m4_t test_vmv_v_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmv_v_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i32m8_tu(
@@ -319,7 +319,7 @@ vint32m4_t test_vmv_v_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmv_v_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i32m8_tu(
@@ -328,7 +328,7 @@ vint32m8_t test_vmv_v_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmv_v_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i64m1_tu(
@@ -337,7 +337,7 @@ vint32m8_t test_vmv_v_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmv_v_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i64m1_tu(
@@ -346,7 +346,7 @@ vint64m1_t test_vmv_v_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmv_v_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i64m2_tu(
@@ -355,7 +355,7 @@ vint64m1_t test_vmv_v_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmv_v_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i64m2_tu(
@@ -364,7 +364,7 @@ vint64m2_t test_vmv_v_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmv_v_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i64m4_tu(
@@ -373,7 +373,7 @@ vint64m2_t test_vmv_v_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmv_v_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i64m4_tu(
@@ -382,7 +382,7 @@ vint64m4_t test_vmv_v_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmv_v_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_i64m8_tu(
@@ -391,7 +391,7 @@ vint64m4_t test_vmv_v_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmv_v_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_i64m8_tu(
@@ -400,7 +400,7 @@ vint64m8_t test_vmv_v_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmv_v_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8_tu(
@@ -409,7 +409,7 @@ vint64m8_t test_vmv_v_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmv_v_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8_tu(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vmv_v_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmv_v_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4_tu(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vmv_v_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmv_v_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4_tu(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vmv_v_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmv_v_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2_tu(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vmv_v_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmv_v_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2_tu(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vmv_v_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmv_v_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8m1_tu(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vmv_v_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmv_v_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u8m1_tu(
@@ -472,7 +472,7 @@ vuint8m1_t test_vmv_v_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmv_v_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8m2_tu(
@@ -481,7 +481,7 @@ vuint8m1_t test_vmv_v_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmv_v_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u8m2_tu(
@@ -490,7 +490,7 @@ vuint8m2_t test_vmv_v_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmv_v_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8m4_tu(
@@ -499,7 +499,7 @@ vuint8m2_t test_vmv_v_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmv_v_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u8m4_tu(
@@ -508,7 +508,7 @@ vuint8m4_t test_vmv_v_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmv_v_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u8m8_tu(
@@ -517,7 +517,7 @@ vuint8m4_t test_vmv_v_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmv_v_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u8m8_tu(
@@ -526,7 +526,7 @@ vuint8m8_t test_vmv_v_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmv_v_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4_tu(
@@ -535,7 +535,7 @@ vuint8m8_t test_vmv_v_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4_tu(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmv_v_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2_tu(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vmv_v_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2_tu(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmv_v_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16m1_tu(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vmv_v_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmv_v_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u16m1_tu(
@@ -580,7 +580,7 @@ vuint16m1_t test_vmv_v_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmv_v_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16m2_tu(
@@ -589,7 +589,7 @@ vuint16m1_t test_vmv_v_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmv_v_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u16m2_tu(
@@ -598,7 +598,7 @@ vuint16m2_t test_vmv_v_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmv_v_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16m4_tu(
@@ -607,7 +607,7 @@ vuint16m2_t test_vmv_v_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmv_v_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u16m4_tu(
@@ -616,7 +616,7 @@ vuint16m4_t test_vmv_v_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmv_v_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u16m8_tu(
@@ -625,7 +625,7 @@ vuint16m4_t test_vmv_v_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmv_v_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u16m8_tu(
@@ -634,7 +634,7 @@ vuint16m8_t test_vmv_v_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmv_v_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_tu(
@@ -643,7 +643,7 @@ vuint16m8_t test_vmv_v_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2_tu(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32m1_tu(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmv_v_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u32m1_tu(
@@ -670,7 +670,7 @@ vuint32m1_t test_vmv_v_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmv_v_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32m2_tu(
@@ -679,7 +679,7 @@ vuint32m1_t test_vmv_v_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmv_v_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u32m2_tu(
@@ -688,7 +688,7 @@ vuint32m2_t test_vmv_v_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmv_v_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32m4_tu(
@@ -697,7 +697,7 @@ vuint32m2_t test_vmv_v_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmv_v_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u32m4_tu(
@@ -706,7 +706,7 @@ vuint32m4_t test_vmv_v_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmv_v_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u32m8_tu(
@@ -715,7 +715,7 @@ vuint32m4_t test_vmv_v_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmv_v_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u32m8_tu(
@@ -724,7 +724,7 @@ vuint32m8_t test_vmv_v_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmv_v_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u64m1_tu(
@@ -733,7 +733,7 @@ vuint32m8_t test_vmv_v_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmv_v_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u64m1_tu(
@@ -742,7 +742,7 @@ vuint64m1_t test_vmv_v_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmv_v_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u64m2_tu(
@@ -751,7 +751,7 @@ vuint64m1_t test_vmv_v_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmv_v_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u64m2_tu(
@@ -760,7 +760,7 @@ vuint64m2_t test_vmv_v_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmv_v_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u64m4_tu(
@@ -769,7 +769,7 @@ vuint64m2_t test_vmv_v_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmv_v_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u64m4_tu(
@@ -778,7 +778,7 @@ vuint64m4_t test_vmv_v_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmv_v_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_u64m8_tu(
@@ -787,7 +787,7 @@ vuint64m4_t test_vmv_v_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmv_v_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_x_u64m8_tu(
@@ -796,7 +796,7 @@ vuint64m8_t test_vmv_v_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmv_v_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4_tu(
@@ -805,7 +805,7 @@ vuint64m8_t test_vmv_v_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2_tu(
@@ -814,7 +814,7 @@ vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1_tu(
@@ -823,7 +823,7 @@ vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2_tu(
@@ -832,7 +832,7 @@ vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4_tu(
@@ -841,7 +841,7 @@ vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8_tu(
@@ -850,7 +850,7 @@ vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_tu(
@@ -859,7 +859,7 @@ vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32m1_tu(
@@ -868,7 +868,7 @@ vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32m2_tu(
@@ -877,7 +877,7 @@ vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32m4_tu(
@@ -886,7 +886,7 @@ vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f32m8_tu(
@@ -895,7 +895,7 @@ vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f64m1_tu(
@@ -904,7 +904,7 @@ vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f64m2_tu(
@@ -913,7 +913,7 @@ vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f64m4_tu(
@@ -922,7 +922,7 @@ vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_v_v_f64m8_tu(
@@ -931,7 +931,7 @@ vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
- return vmv_v_tu(maskedoff, src, vl);
+ return __riscv_vmv_v_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8_tu(
@@ -940,7 +940,7 @@ vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmv_s_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4_tu(
@@ -949,7 +949,7 @@ vint8mf8_t test_vmv_s_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmv_s_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2_tu(
@@ -958,7 +958,7 @@ vint8mf4_t test_vmv_s_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmv_s_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i8m1_tu(
@@ -967,7 +967,7 @@ vint8mf2_t test_vmv_s_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmv_s_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i8m2_tu(
@@ -976,7 +976,7 @@ vint8m1_t test_vmv_s_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmv_s_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i8m4_tu(
@@ -985,7 +985,7 @@ vint8m2_t test_vmv_s_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmv_s_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i8m8_tu(
@@ -994,7 +994,7 @@ vint8m4_t test_vmv_s_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmv_s_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4_tu(
@@ -1003,7 +1003,7 @@ vint8m8_t test_vmv_s_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmv_s_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2_tu(
@@ -1012,7 +1012,7 @@ vint16mf4_t test_vmv_s_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmv_s_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i16m1_tu(
@@ -1021,7 +1021,7 @@ vint16mf2_t test_vmv_s_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmv_s_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i16m2_tu(
@@ -1030,7 +1030,7 @@ vint16m1_t test_vmv_s_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmv_s_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i16m4_tu(
@@ -1039,7 +1039,7 @@ vint16m2_t test_vmv_s_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmv_s_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i16m8_tu(
@@ -1048,7 +1048,7 @@ vint16m4_t test_vmv_s_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmv_s_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2_tu(
@@ -1057,7 +1057,7 @@ vint16m8_t test_vmv_s_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmv_s_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i32m1_tu(
@@ -1066,7 +1066,7 @@ vint32mf2_t test_vmv_s_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmv_s_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i32m2_tu(
@@ -1075,7 +1075,7 @@ vint32m1_t test_vmv_s_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmv_s_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i32m4_tu(
@@ -1084,7 +1084,7 @@ vint32m2_t test_vmv_s_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmv_s_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i32m8_tu(
@@ -1093,7 +1093,7 @@ vint32m4_t test_vmv_s_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmv_s_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i64m1_tu(
@@ -1102,7 +1102,7 @@ vint32m8_t test_vmv_s_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmv_s_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i64m2_tu(
@@ -1111,7 +1111,7 @@ vint64m1_t test_vmv_s_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmv_s_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i64m4_tu(
@@ -1120,7 +1120,7 @@ vint64m2_t test_vmv_s_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmv_s_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_i64m8_tu(
@@ -1129,7 +1129,7 @@ vint64m4_t test_vmv_s_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmv_s_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8_tu(
@@ -1138,7 +1138,7 @@ vint64m8_t test_vmv_s_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmv_s_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4_tu(
@@ -1147,7 +1147,7 @@ vuint8mf8_t test_vmv_s_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmv_s_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2_tu(
@@ -1156,7 +1156,7 @@ vuint8mf4_t test_vmv_s_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmv_s_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u8m1_tu(
@@ -1165,7 +1165,7 @@ vuint8mf2_t test_vmv_s_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmv_s_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u8m2_tu(
@@ -1174,7 +1174,7 @@ vuint8m1_t test_vmv_s_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmv_s_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u8m4_tu(
@@ -1183,7 +1183,7 @@ vuint8m2_t test_vmv_s_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmv_s_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u8m8_tu(
@@ -1192,7 +1192,7 @@ vuint8m4_t test_vmv_s_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmv_s_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4_tu(
@@ -1201,7 +1201,7 @@ vuint8m8_t test_vmv_s_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmv_s_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2_tu(
@@ -1210,7 +1210,7 @@ vuint16mf4_t test_vmv_s_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmv_s_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u16m1_tu(
@@ -1219,7 +1219,7 @@ vuint16mf2_t test_vmv_s_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmv_s_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u16m2_tu(
@@ -1228,7 +1228,7 @@ vuint16m1_t test_vmv_s_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmv_s_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u16m4_tu(
@@ -1237,7 +1237,7 @@ vuint16m2_t test_vmv_s_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmv_s_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u16m8_tu(
@@ -1246,7 +1246,7 @@ vuint16m4_t test_vmv_s_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmv_s_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2_tu(
@@ -1255,7 +1255,7 @@ vuint16m8_t test_vmv_s_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmv_s_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u32m1_tu(
@@ -1264,7 +1264,7 @@ vuint32mf2_t test_vmv_s_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmv_s_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u32m2_tu(
@@ -1273,7 +1273,7 @@ vuint32m1_t test_vmv_s_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmv_s_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u32m4_tu(
@@ -1282,7 +1282,7 @@ vuint32m2_t test_vmv_s_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmv_s_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u32m8_tu(
@@ -1291,7 +1291,7 @@ vuint32m4_t test_vmv_s_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmv_s_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u64m1_tu(
@@ -1300,7 +1300,7 @@ vuint32m8_t test_vmv_s_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmv_s_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u64m2_tu(
@@ -1309,7 +1309,7 @@ vuint64m1_t test_vmv_s_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmv_s_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u64m4_tu(
@@ -1318,7 +1318,7 @@ vuint64m2_t test_vmv_s_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmv_s_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vmv_s_x_u64m8_tu(
@@ -1327,6 +1327,6 @@ vuint64m4_t test_vmv_s_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl) {
- return vmv_s_tu(maskedoff, src, vl);
+ return __riscv_vmv_s_tu(maskedoff, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclip.c
index 99de3c6782c4..4f4d07f6f7fa 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclip.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclip.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_tu(
@@ -129,7 +129,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_tu(
@@ -138,7 +138,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_tu(
@@ -147,7 +147,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_tu(
@@ -156,7 +156,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_tu(
@@ -165,7 +165,7 @@ vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_tu(
@@ -174,7 +174,7 @@ vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_tu(
@@ -183,7 +183,7 @@ vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_tu(
@@ -192,7 +192,7 @@ vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_tu(
@@ -201,7 +201,7 @@ vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tu(
@@ -210,7 +210,7 @@ vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tu(
@@ -219,7 +219,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_tu(
@@ -228,7 +228,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_tu(
@@ -237,7 +237,7 @@ vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_tu(
@@ -246,7 +246,7 @@ vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_tu(
@@ -255,7 +255,7 @@ vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_tu(
@@ -264,7 +264,7 @@ vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_tu(
@@ -273,7 +273,7 @@ vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_tum(
@@ -282,7 +282,7 @@ vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_tum(
@@ -291,7 +291,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_tum(
@@ -300,7 +300,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_tum(
@@ -309,7 +309,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_tum(
@@ -318,7 +318,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_tum(
@@ -327,7 +327,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_tum(
@@ -336,7 +336,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_tum(
@@ -345,7 +345,7 @@ vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_tum(
@@ -354,7 +354,7 @@ vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_tum(
@@ -363,7 +363,7 @@ vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_tum(
@@ -372,7 +372,7 @@ vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_tum(
@@ -381,7 +381,7 @@ vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_tum(
@@ -390,7 +390,7 @@ vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_tum(
@@ -399,7 +399,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_tum(
@@ -408,7 +408,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_tum(
@@ -417,7 +417,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_tum(
@@ -426,7 +426,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_tum(
@@ -435,7 +435,7 @@ vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_tum(
@@ -444,7 +444,7 @@ vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_tum(
@@ -453,7 +453,7 @@ vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_tum(
@@ -462,7 +462,7 @@ vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_tum(
@@ -471,7 +471,7 @@ vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tum(
@@ -480,7 +480,7 @@ vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tum(
@@ -489,7 +489,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_tum(
@@ -498,7 +498,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_tum(
@@ -507,7 +507,7 @@ vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_tum(
@@ -516,7 +516,7 @@ vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_tum(
@@ -525,7 +525,7 @@ vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_tum(
@@ -534,7 +534,7 @@ vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_tum(
@@ -543,7 +543,7 @@ vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_tumu(
@@ -552,7 +552,7 @@ vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_tumu(
@@ -561,7 +561,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_tumu(
@@ -570,7 +570,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_tumu(
@@ -579,7 +579,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_tumu(
@@ -588,7 +588,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_tumu(
@@ -597,7 +597,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_tumu(
@@ -606,7 +606,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_tumu(
@@ -615,7 +615,7 @@ vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_tumu(
@@ -624,7 +624,7 @@ vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_tumu(
@@ -633,7 +633,7 @@ vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_tumu(
@@ -642,7 +642,7 @@ vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_tumu(
@@ -651,7 +651,7 @@ vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_tumu(
@@ -660,7 +660,7 @@ vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_tumu(
@@ -669,7 +669,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_tumu(
@@ -678,7 +678,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_tumu(
@@ -687,7 +687,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_tumu(
@@ -696,7 +696,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_tumu(
@@ -705,7 +705,7 @@ vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_tumu(
@@ -714,7 +714,7 @@ vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_tumu(
@@ -723,7 +723,7 @@ vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_tumu(
@@ -732,7 +732,7 @@ vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_tumu(
@@ -741,7 +741,7 @@ vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tumu(
@@ -750,7 +750,7 @@ vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tumu(
@@ -759,7 +759,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_tumu(
@@ -768,7 +768,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_tumu(
@@ -777,7 +777,7 @@ vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_tumu(
@@ -786,7 +786,7 @@ vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_tumu(
@@ -795,7 +795,7 @@ vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_tumu(
@@ -804,7 +804,7 @@ vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_tumu(
@@ -813,7 +813,7 @@ vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_mu(
@@ -822,7 +822,7 @@ vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_mu(
@@ -831,7 +831,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_mu(
@@ -840,7 +840,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_mu(
@@ -849,7 +849,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_mu(
@@ -858,7 +858,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_mu(
@@ -867,7 +867,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_mu(
@@ -876,7 +876,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_mu(
@@ -885,7 +885,7 @@ vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_mu(
@@ -894,7 +894,7 @@ vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_mu(
@@ -903,7 +903,7 @@ vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_mu(
@@ -912,7 +912,7 @@ vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_mu(
@@ -921,7 +921,7 @@ vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_mu(
@@ -930,7 +930,7 @@ vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_mu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_mu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_mu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_mu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_mu(
@@ -975,7 +975,7 @@ vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_mu(
@@ -984,7 +984,7 @@ vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_mu(
@@ -993,7 +993,7 @@ vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_mu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_mu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_mu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_mu(
@@ -1029,7 +1029,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_mu(
@@ -1038,7 +1038,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_mu(
@@ -1047,7 +1047,7 @@ vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_mu(
@@ -1056,7 +1056,7 @@ vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_mu(
@@ -1065,7 +1065,7 @@ vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_mu(
@@ -1074,7 +1074,7 @@ vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_mu(
@@ -1083,6 +1083,6 @@ vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclipu.c
index 3cc72581552e..b96bc483074f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclipu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnclipu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_tu(
@@ -129,7 +129,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_tu(
@@ -138,7 +138,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_tu(
@@ -147,7 +147,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_tu(
@@ -156,7 +156,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_tu(
@@ -165,7 +165,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_tu(
@@ -174,7 +174,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_tu(
@@ -183,7 +183,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_tu(
@@ -192,7 +192,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_tu(
@@ -201,7 +201,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tu(
@@ -210,7 +210,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tu(
@@ -219,7 +219,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_tu(
@@ -228,7 +228,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_tu(
@@ -237,7 +237,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_tu(
@@ -246,7 +246,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_tu(
@@ -255,7 +255,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_tu(
@@ -264,7 +264,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_tu(
@@ -273,7 +273,7 @@ vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_tum(
@@ -282,7 +282,7 @@ vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_tum(
@@ -291,7 +291,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_tum(
@@ -300,7 +300,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_tum(
@@ -309,7 +309,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_tum(
@@ -318,7 +318,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_tum(
@@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_tum(
@@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_tum(
@@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_tum(
@@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_tum(
@@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_tum(
@@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_tum(
@@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_tum(
@@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_tum(
@@ -399,7 +399,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_tum(
@@ -408,7 +408,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_tum(
@@ -417,7 +417,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_tum(
@@ -426,7 +426,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_tum(
@@ -435,7 +435,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_tum(
@@ -444,7 +444,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_tum(
@@ -453,7 +453,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_tum(
@@ -462,7 +462,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_tum(
@@ -471,7 +471,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tum(
@@ -480,7 +480,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tum(
@@ -489,7 +489,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_tum(
@@ -498,7 +498,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_tum(
@@ -507,7 +507,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_tum(
@@ -516,7 +516,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_tum(
@@ -525,7 +525,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_tum(
@@ -534,7 +534,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_tum(
@@ -543,7 +543,7 @@ vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_tumu(
@@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_tumu(
@@ -561,7 +561,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_tumu(
@@ -570,7 +570,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_tumu(
@@ -579,7 +579,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_tumu(
@@ -588,7 +588,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_tumu(
@@ -597,7 +597,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_tumu(
@@ -606,7 +606,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_tumu(
@@ -615,7 +615,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_tumu(
@@ -624,7 +624,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_tumu(
@@ -633,7 +633,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_tumu(
@@ -642,7 +642,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_tumu(
@@ -651,7 +651,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_tumu(
@@ -660,7 +660,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_tumu(
@@ -669,7 +669,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_tumu(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_tumu(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_tumu(
@@ -696,7 +696,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_tumu(
@@ -705,7 +705,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_tumu(
@@ -714,7 +714,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_tumu(
@@ -723,7 +723,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_tumu(
@@ -732,7 +732,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_tumu(
@@ -741,7 +741,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tumu(
@@ -750,7 +750,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tumu(
@@ -759,7 +759,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_tumu(
@@ -768,7 +768,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_tumu(
@@ -777,7 +777,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_tumu(
@@ -786,7 +786,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_tumu(
@@ -795,7 +795,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_tumu(
@@ -804,7 +804,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_tumu(
@@ -813,7 +813,7 @@ vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_mu(
@@ -822,7 +822,7 @@ vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_mu(
@@ -831,7 +831,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_mu(
@@ -840,7 +840,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_mu(
@@ -849,7 +849,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_mu(
@@ -858,7 +858,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_mu(
@@ -867,7 +867,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_mu(
@@ -876,7 +876,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_mu(
@@ -885,7 +885,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_mu(
@@ -894,7 +894,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_mu(
@@ -903,7 +903,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_mu(
@@ -912,7 +912,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_mu(
@@ -921,7 +921,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_mu(
@@ -930,7 +930,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_mu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_mu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_mu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_mu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_mu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_mu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_mu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_mu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_mu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_mu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_mu(
@@ -1029,7 +1029,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_mu(
@@ -1038,7 +1038,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_mu(
@@ -1047,7 +1047,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_mu(
@@ -1056,7 +1056,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_mu(
@@ -1065,7 +1065,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_mu(
@@ -1074,7 +1074,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_mu(
@@ -1083,6 +1083,6 @@ vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vncvt.c
index e5935e5f941f..06015c91faf3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vncvt.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_tu(
@@ -30,7 +30,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vncvt_x_x_w_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_tu(
@@ -39,7 +39,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vncvt_x_x_w_i8m1_tu(vint8m1_t maskedoff, vint16m2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_tu(vint8m1_t maskedoff, vint16m2_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vncvt_x_x_w_i8m2_tu(vint8m2_t maskedoff, vint16m4_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_tu(
@@ -57,7 +57,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_tu(vint8m2_t maskedoff, vint16m4_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vncvt_x_x_w_i8m4_tu(vint8m4_t maskedoff, vint16m8_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_tu(
@@ -66,7 +66,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_tu(vint8m4_t maskedoff, vint16m8_t src, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_tu(
@@ -75,7 +75,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_tu(
@@ -84,7 +84,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_tu(
@@ -93,7 +93,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t src, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vncvt_x_x_w_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_tu(
@@ -102,7 +102,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vncvt_x_x_w_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vncvt_x_x_w_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_tu(
@@ -129,7 +129,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_tu(
@@ -138,7 +138,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vncvt_x_x_w_i16m1_tu(vint16m1_t maskedoff, vint32m2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_tu(
@@ -147,7 +147,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_tu(vint16m1_t maskedoff, vint32m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vncvt_x_x_w_i16m2_tu(vint16m2_t maskedoff, vint32m4_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_tu(
@@ -156,7 +156,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_tu(vint16m2_t maskedoff, vint32m4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vncvt_x_x_w_i16m4_tu(vint16m4_t maskedoff, vint32m8_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tu(
@@ -165,7 +165,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_tu(vint16m4_t maskedoff, vint32m8_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_tu(
@@ -174,7 +174,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t src
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_tu(
@@ -210,7 +210,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t src, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_tu(
@@ -219,7 +219,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t src, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vncvt_x_x_w_i32m1_tu(vint32m1_t maskedoff, vint64m2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_tu(
@@ -228,7 +228,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_tu(vint32m1_t maskedoff, vint64m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vncvt_x_x_w_i32m2_tu(vint32m2_t maskedoff, vint64m4_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_tu(
@@ -237,7 +237,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_tu(vint32m2_t maskedoff, vint64m4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vncvt_x_x_w_i32m4_tu(vint32m4_t maskedoff, vint64m8_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_tu(
@@ -246,7 +246,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_tu(vint32m4_t maskedoff, vint64m8_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_tu(
@@ -264,7 +264,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_tu(
@@ -273,7 +273,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) {
- return vncvt_x_tu(maskedoff, src, vl);
+ return __riscv_vncvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_tum(
@@ -282,7 +282,7 @@ vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t src, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_tum(
@@ -291,7 +291,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_tum(
@@ -300,7 +300,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_tum(
@@ -309,7 +309,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_tum(
@@ -318,7 +318,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_tum(
@@ -327,7 +327,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_tum(
@@ -336,7 +336,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_tum(
@@ -345,7 +345,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_tum(
@@ -354,7 +354,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_tum(
@@ -363,7 +363,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_tum(
@@ -372,7 +372,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_tum(
@@ -381,7 +381,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tum(
@@ -390,7 +390,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_tum(
@@ -399,7 +399,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_tum(
@@ -408,7 +408,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_tum(
@@ -417,7 +417,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_tum(
@@ -426,7 +426,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tum(
@@ -435,7 +435,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_tum(
@@ -444,7 +444,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_tum(
@@ -453,7 +453,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_tum(
@@ -462,7 +462,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_tum(
@@ -471,7 +471,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_tum(
@@ -480,7 +480,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_tum(
@@ -489,7 +489,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_tum(
@@ -498,7 +498,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_tum(
@@ -507,7 +507,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_tum(
@@ -516,7 +516,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_tum(
@@ -525,7 +525,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_tum(
@@ -534,7 +534,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_tum(
@@ -543,7 +543,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) {
- return vncvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_tumu(
@@ -552,7 +552,7 @@ vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_tumu(
@@ -561,7 +561,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_tumu(
@@ -570,7 +570,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_tumu(
@@ -579,7 +579,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_tumu(
@@ -588,7 +588,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_tumu(
@@ -597,7 +597,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_tumu(
@@ -606,7 +606,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_tumu(
@@ -615,7 +615,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_tumu(
@@ -624,7 +624,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_tumu(
@@ -633,7 +633,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_tumu(
@@ -642,7 +642,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_tumu(
@@ -651,7 +651,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tumu(
@@ -660,7 +660,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_tumu(
@@ -669,7 +669,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_tumu(
@@ -678,7 +678,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_tumu(
@@ -687,7 +687,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_tumu(
@@ -696,7 +696,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tumu(
@@ -705,7 +705,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_tumu(
@@ -714,7 +714,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_tumu(
@@ -723,7 +723,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_tumu(
@@ -732,7 +732,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_tumu(
@@ -741,7 +741,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_tumu(
@@ -750,7 +750,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_tumu(
@@ -759,7 +759,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_tumu(
@@ -768,7 +768,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_tumu(
@@ -777,7 +777,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_tumu(
@@ -786,7 +786,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_tumu(
@@ -795,7 +795,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_tumu(
@@ -804,7 +804,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_tumu(
@@ -813,7 +813,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) {
- return vncvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_mu(
@@ -822,7 +822,7 @@ vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_mu(
@@ -831,7 +831,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_mu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_mu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_mu(
@@ -858,7 +858,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_mu(
@@ -867,7 +867,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_mu(
@@ -876,7 +876,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_mu(
@@ -885,7 +885,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_mu(
@@ -894,7 +894,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_mu(
@@ -903,7 +903,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_mu(
@@ -912,7 +912,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_mu(
@@ -921,7 +921,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_mu(
@@ -930,7 +930,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_mu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_mu(
@@ -948,7 +948,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_mu(
@@ -957,7 +957,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_mu(
@@ -966,7 +966,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_mu(
@@ -975,7 +975,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_mu(
@@ -984,7 +984,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_mu(
@@ -993,7 +993,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_mu(
@@ -1002,7 +1002,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_mu(
@@ -1011,7 +1011,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_mu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_mu(
@@ -1029,7 +1029,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_mu(
@@ -1038,7 +1038,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_mu(
@@ -1047,7 +1047,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_mu(
@@ -1056,7 +1056,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_mu(
@@ -1065,7 +1065,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_mu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_mu(
@@ -1083,6 +1083,6 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vncvt_x_x_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) {
- return vncvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vncvt_x_mu(mask, maskedoff, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vneg.c
index 91f709a9f4cc..63c88483b9a8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vneg.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vneg_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4_tu(
@@ -22,7 +22,7 @@ vint8mf8_t test_vneg_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vneg_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2_tu(
@@ -31,7 +31,7 @@ vint8mf4_t test_vneg_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vneg_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m1_tu(
@@ -40,7 +40,7 @@ vint8mf2_t test_vneg_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vneg_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m2_tu(
@@ -49,7 +49,7 @@ vint8m1_t test_vneg_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vneg_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m4_tu(
@@ -58,7 +58,7 @@ vint8m2_t test_vneg_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vneg_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m8_tu(
@@ -67,7 +67,7 @@ vint8m4_t test_vneg_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vneg_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4_tu(
@@ -76,7 +76,7 @@ vint8m8_t test_vneg_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vneg_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2_tu(
@@ -85,7 +85,7 @@ vint16mf4_t test_vneg_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vneg_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m1_tu(
@@ -94,7 +94,7 @@ vint16mf2_t test_vneg_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vneg_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m2_tu(
@@ -103,7 +103,7 @@ vint16m1_t test_vneg_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vneg_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m4_tu(
@@ -112,7 +112,7 @@ vint16m2_t test_vneg_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vneg_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m8_tu(
@@ -121,7 +121,7 @@ vint16m4_t test_vneg_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vneg_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tu(
@@ -130,7 +130,7 @@ vint16m8_t test_vneg_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m1_tu(
@@ -139,7 +139,7 @@ vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vneg_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m2_tu(
@@ -148,7 +148,7 @@ vint32m1_t test_vneg_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vneg_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m4_tu(
@@ -157,7 +157,7 @@ vint32m2_t test_vneg_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vneg_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m8_tu(
@@ -166,7 +166,7 @@ vint32m4_t test_vneg_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vneg_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m1_tu(
@@ -175,7 +175,7 @@ vint32m8_t test_vneg_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vneg_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m2_tu(
@@ -184,7 +184,7 @@ vint64m1_t test_vneg_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vneg_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m4_tu(
@@ -193,7 +193,7 @@ vint64m2_t test_vneg_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vneg_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m8_tu(
@@ -202,7 +202,7 @@ vint64m4_t test_vneg_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vneg_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) {
- return vneg_tu(maskedoff, op1, vl);
+ return __riscv_vneg_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf8_tum(
@@ -211,7 +211,7 @@ vint64m8_t test_vneg_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4_tum(
@@ -220,7 +220,7 @@ vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2_tum(
@@ -229,7 +229,7 @@ vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m1_tum(
@@ -238,7 +238,7 @@ vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vneg_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m2_tum(
@@ -247,7 +247,7 @@ vint8m1_t test_vneg_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vneg_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m4_tum(
@@ -256,7 +256,7 @@ vint8m2_t test_vneg_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vneg_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m8_tum(
@@ -265,7 +265,7 @@ vint8m4_t test_vneg_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vneg_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4_tum(
@@ -274,7 +274,7 @@ vint8m8_t test_vneg_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2_tum(
@@ -283,7 +283,7 @@ vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m1_tum(
@@ -292,7 +292,7 @@ vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vneg_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m2_tum(
@@ -301,7 +301,7 @@ vint16m1_t test_vneg_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vneg_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m4_tum(
@@ -310,7 +310,7 @@ vint16m2_t test_vneg_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vneg_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m8_tum(
@@ -319,7 +319,7 @@ vint16m4_t test_vneg_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vneg_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tum(
@@ -328,7 +328,7 @@ vint16m8_t test_vneg_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m1_tum(
@@ -337,7 +337,7 @@ vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vneg_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m2_tum(
@@ -346,7 +346,7 @@ vint32m1_t test_vneg_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vneg_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m4_tum(
@@ -355,7 +355,7 @@ vint32m2_t test_vneg_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vneg_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m8_tum(
@@ -364,7 +364,7 @@ vint32m4_t test_vneg_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vneg_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m1_tum(
@@ -373,7 +373,7 @@ vint32m8_t test_vneg_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vneg_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m2_tum(
@@ -382,7 +382,7 @@ vint64m1_t test_vneg_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vneg_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m4_tum(
@@ -391,7 +391,7 @@ vint64m2_t test_vneg_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vneg_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m8_tum(
@@ -400,7 +400,7 @@ vint64m4_t test_vneg_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vneg_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) {
- return vneg_tum(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf8_tumu(
@@ -409,7 +409,7 @@ vint64m8_t test_vneg_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4_tumu(
@@ -418,7 +418,7 @@ vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2_tumu(
@@ -427,7 +427,7 @@ vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m1_tumu(
@@ -436,7 +436,7 @@ vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m2_tumu(
@@ -445,7 +445,7 @@ vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m4_tumu(
@@ -454,7 +454,7 @@ vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m8_tumu(
@@ -463,7 +463,7 @@ vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4_tumu(
@@ -472,7 +472,7 @@ vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2_tumu(
@@ -481,7 +481,7 @@ vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m1_tumu(
@@ -490,7 +490,7 @@ vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m2_tumu(
@@ -499,7 +499,7 @@ vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m4_tumu(
@@ -508,7 +508,7 @@ vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m8_tumu(
@@ -517,7 +517,7 @@ vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tumu(
@@ -526,7 +526,7 @@ vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m1_tumu(
@@ -535,7 +535,7 @@ vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m2_tumu(
@@ -544,7 +544,7 @@ vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m4_tumu(
@@ -553,7 +553,7 @@ vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m8_tumu(
@@ -562,7 +562,7 @@ vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m1_tumu(
@@ -571,7 +571,7 @@ vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m2_tumu(
@@ -580,7 +580,7 @@ vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m4_tumu(
@@ -589,7 +589,7 @@ vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m8_tumu(
@@ -598,7 +598,7 @@ vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) {
- return vneg_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf8_mu(
@@ -607,7 +607,7 @@ vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf4_mu(
@@ -616,7 +616,7 @@ vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8mf2_mu(
@@ -625,7 +625,7 @@ vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m1_mu(
@@ -634,7 +634,7 @@ vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vneg_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m2_mu(
@@ -643,7 +643,7 @@ vint8m1_t test_vneg_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vneg_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m4_mu(
@@ -652,7 +652,7 @@ vint8m2_t test_vneg_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vneg_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i8m8_mu(
@@ -661,7 +661,7 @@ vint8m4_t test_vneg_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vneg_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf4_mu(
@@ -670,7 +670,7 @@ vint8m8_t test_vneg_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16mf2_mu(
@@ -679,7 +679,7 @@ vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m1_mu(
@@ -688,7 +688,7 @@ vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vneg_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m2_mu(
@@ -697,7 +697,7 @@ vint16m1_t test_vneg_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vneg_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m4_mu(
@@ -706,7 +706,7 @@ vint16m2_t test_vneg_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vneg_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i16m8_mu(
@@ -715,7 +715,7 @@ vint16m4_t test_vneg_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vneg_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_mu(
@@ -724,7 +724,7 @@ vint16m8_t test_vneg_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m1_mu(
@@ -733,7 +733,7 @@ vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vneg_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m2_mu(
@@ -742,7 +742,7 @@ vint32m1_t test_vneg_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vneg_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m4_mu(
@@ -751,7 +751,7 @@ vint32m2_t test_vneg_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vneg_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i32m8_mu(
@@ -760,7 +760,7 @@ vint32m4_t test_vneg_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vneg_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m1_mu(
@@ -769,7 +769,7 @@ vint32m8_t test_vneg_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vneg_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m2_mu(
@@ -778,7 +778,7 @@ vint64m1_t test_vneg_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vneg_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m4_mu(
@@ -787,7 +787,7 @@ vint64m2_t test_vneg_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vneg_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vneg_v_i64m8_mu(
@@ -796,6 +796,6 @@ vint64m4_t test_vneg_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vneg_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) {
- return vneg_mu(mask, maskedoff, op1, vl);
+ return __riscv_vneg_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsac.c
index 12a1b38fd1c0..f21f138cc55b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsac.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_tu(
@@ -22,7 +22,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_tu(
@@ -31,7 +31,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_tu(
@@ -40,7 +40,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_tu(
@@ -49,7 +49,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_tu(
@@ -58,7 +58,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_tu(
@@ -67,7 +67,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_tu(
@@ -76,7 +76,7 @@ vint8m1_t test_vnmsac_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_tu(
@@ -85,7 +85,7 @@ vint8m1_t test_vnmsac_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_tu(
@@ -94,7 +94,7 @@ vint8m2_t test_vnmsac_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_tu(
@@ -103,7 +103,7 @@ vint8m2_t test_vnmsac_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_tu(
@@ -112,7 +112,7 @@ vint8m4_t test_vnmsac_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_tu(
@@ -121,7 +121,7 @@ vint8m4_t test_vnmsac_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_tu(
@@ -130,7 +130,7 @@ vint8m8_t test_vnmsac_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_tu(
@@ -139,7 +139,7 @@ vint8m8_t test_vnmsac_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_tu(
@@ -148,7 +148,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_tu(
@@ -157,7 +157,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_tu(
@@ -166,7 +166,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_tu(
@@ -175,7 +175,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_tu(
@@ -184,7 +184,7 @@ vint16m1_t test_vnmsac_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_tu(
@@ -193,7 +193,7 @@ vint16m1_t test_vnmsac_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_tu(
@@ -202,7 +202,7 @@ vint16m2_t test_vnmsac_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_tu(
@@ -211,7 +211,7 @@ vint16m2_t test_vnmsac_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_tu(
@@ -220,7 +220,7 @@ vint16m4_t test_vnmsac_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_tu(
@@ -229,7 +229,7 @@ vint16m4_t test_vnmsac_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_tu(
@@ -238,7 +238,7 @@ vint16m8_t test_vnmsac_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tu(
@@ -247,7 +247,7 @@ vint16m8_t test_vnmsac_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tu(
@@ -256,7 +256,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_tu(
@@ -265,7 +265,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_tu(
@@ -274,7 +274,7 @@ vint32m1_t test_vnmsac_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vnmsac_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vnmsac_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_tu(
@@ -301,7 +301,7 @@ vint32m2_t test_vnmsac_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_tu(
@@ -310,7 +310,7 @@ vint32m4_t test_vnmsac_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_tu(
@@ -319,7 +319,7 @@ vint32m4_t test_vnmsac_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_tu(
@@ -328,7 +328,7 @@ vint32m8_t test_vnmsac_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_tu(
@@ -337,7 +337,7 @@ vint32m8_t test_vnmsac_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_tu(
@@ -346,7 +346,7 @@ vint64m1_t test_vnmsac_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_tu(
@@ -355,7 +355,7 @@ vint64m1_t test_vnmsac_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_tu(
@@ -364,7 +364,7 @@ vint64m2_t test_vnmsac_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_tu(
@@ -373,7 +373,7 @@ vint64m2_t test_vnmsac_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_tu(
@@ -382,7 +382,7 @@ vint64m4_t test_vnmsac_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_tu(
@@ -391,7 +391,7 @@ vint64m4_t test_vnmsac_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_tu(
@@ -400,7 +400,7 @@ vint64m8_t test_vnmsac_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_tu(
@@ -409,7 +409,7 @@ vint64m8_t test_vnmsac_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_tu(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_tu(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_tu(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_tu(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_tu(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_tu(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_tu(
@@ -472,7 +472,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_tu(
@@ -481,7 +481,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_tu(
@@ -490,7 +490,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_tu(
@@ -499,7 +499,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_tu(
@@ -508,7 +508,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_tu(
@@ -517,7 +517,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_tu(
@@ -526,7 +526,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_tu(
@@ -535,7 +535,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_tu(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_tu(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_tu(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_tu(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_tu(
@@ -580,7 +580,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_tu(
@@ -589,7 +589,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_tu(
@@ -598,7 +598,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_tu(
@@ -607,7 +607,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_tu(
@@ -616,7 +616,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_tu(
@@ -625,7 +625,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_tu(
@@ -634,7 +634,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tu(
@@ -643,7 +643,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tu(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_tu(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_tu(
@@ -670,7 +670,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_tu(
@@ -679,7 +679,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_tu(
@@ -688,7 +688,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_tu(
@@ -697,7 +697,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_tu(
@@ -706,7 +706,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_tu(
@@ -715,7 +715,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_tu(
@@ -724,7 +724,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_tu(
@@ -733,7 +733,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_tu(
@@ -742,7 +742,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_tu(
@@ -751,7 +751,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_tu(
@@ -760,7 +760,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_tu(
@@ -769,7 +769,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_tu(
@@ -778,7 +778,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_tu(
@@ -787,7 +787,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_tu(
@@ -796,7 +796,7 @@ vuint64m8_t test_vnmsac_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_tum(
@@ -805,7 +805,7 @@ vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_tum(
@@ -814,7 +814,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_tum(
@@ -823,7 +823,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_tum(
@@ -832,7 +832,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_tum(
@@ -841,7 +841,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_tum(
@@ -850,7 +850,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_tum(
@@ -859,7 +859,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_tum(
@@ -868,7 +868,7 @@ vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_tum(
@@ -877,7 +877,7 @@ vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_tum(
@@ -886,7 +886,7 @@ vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_tum(
@@ -895,7 +895,7 @@ vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_tum(
@@ -904,7 +904,7 @@ vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_tum(
@@ -913,7 +913,7 @@ vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_tum(
@@ -922,7 +922,7 @@ vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_tum(
@@ -931,7 +931,7 @@ vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_tum(
@@ -940,7 +940,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_tum(
@@ -949,7 +949,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_tum(
@@ -958,7 +958,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_tum(
@@ -967,7 +967,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_tum(
@@ -976,7 +976,7 @@ vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_tum(
@@ -985,7 +985,7 @@ vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_tum(
@@ -994,7 +994,7 @@ vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_tum(
@@ -1003,7 +1003,7 @@ vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_tum(
@@ -1012,7 +1012,7 @@ vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_tum(
@@ -1021,7 +1021,7 @@ vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_tum(
@@ -1030,7 +1030,7 @@ vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tum(
@@ -1039,7 +1039,7 @@ vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tum(
@@ -1048,7 +1048,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_tum(
@@ -1057,7 +1057,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_tum(
@@ -1066,7 +1066,7 @@ vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_tum(
@@ -1075,7 +1075,7 @@ vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_tum(
@@ -1084,7 +1084,7 @@ vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_tum(
@@ -1093,7 +1093,7 @@ vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_tum(
@@ -1102,7 +1102,7 @@ vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_tum(
@@ -1111,7 +1111,7 @@ vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_tum(
@@ -1120,7 +1120,7 @@ vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_tum(
@@ -1129,7 +1129,7 @@ vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_tum(
@@ -1138,7 +1138,7 @@ vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_tum(
@@ -1147,7 +1147,7 @@ vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_tum(
@@ -1156,7 +1156,7 @@ vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_tum(
@@ -1165,7 +1165,7 @@ vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_tum(
@@ -1174,7 +1174,7 @@ vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_tum(
@@ -1183,7 +1183,7 @@ vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_tum(
@@ -1192,7 +1192,7 @@ vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_tum(
@@ -1201,7 +1201,7 @@ vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_tum(
@@ -1210,7 +1210,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_tum(
@@ -1219,7 +1219,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_tum(
@@ -1228,7 +1228,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_tum(
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_tum(
@@ -1246,7 +1246,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_tum(
@@ -1255,7 +1255,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_tum(
@@ -1264,7 +1264,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_tum(
@@ -1273,7 +1273,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_tum(
@@ -1282,7 +1282,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_tum(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_tum(
@@ -1300,7 +1300,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_tum(
@@ -1309,7 +1309,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_tum(
@@ -1318,7 +1318,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_tum(
@@ -1327,7 +1327,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_tum(
@@ -1336,7 +1336,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_tum(
@@ -1345,7 +1345,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_tum(
@@ -1354,7 +1354,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_tum(
@@ -1363,7 +1363,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_tum(
@@ -1372,7 +1372,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_tum(
@@ -1381,7 +1381,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_tum(
@@ -1390,7 +1390,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_tum(
@@ -1399,7 +1399,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_tum(
@@ -1408,7 +1408,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_tum(
@@ -1417,7 +1417,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_tum(
@@ -1426,7 +1426,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tum(
@@ -1435,7 +1435,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tum(
@@ -1444,7 +1444,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_tum(
@@ -1453,7 +1453,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_tum(
@@ -1462,7 +1462,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_tum(
@@ -1471,7 +1471,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_tum(
@@ -1480,7 +1480,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_tum(
@@ -1489,7 +1489,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_tum(
@@ -1498,7 +1498,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_tum(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_tum(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_tum(
@@ -1525,7 +1525,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_tum(
@@ -1534,7 +1534,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_tum(
@@ -1543,7 +1543,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_tum(
@@ -1552,7 +1552,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_tum(
@@ -1561,7 +1561,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_tum(
@@ -1570,7 +1570,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_tum(
@@ -1579,7 +1579,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_tum(
@@ -1588,7 +1588,7 @@ vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_tumu(
@@ -1597,7 +1597,7 @@ vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_tumu(
@@ -1606,7 +1606,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_tumu(
@@ -1615,7 +1615,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_tumu(
@@ -1624,7 +1624,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_tumu(
@@ -1633,7 +1633,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_tumu(
@@ -1642,7 +1642,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_tumu(
@@ -1651,7 +1651,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_tumu(
@@ -1660,7 +1660,7 @@ vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_tumu(
@@ -1669,7 +1669,7 @@ vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_tumu(
@@ -1678,7 +1678,7 @@ vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_tumu(
@@ -1687,7 +1687,7 @@ vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_tumu(
@@ -1696,7 +1696,7 @@ vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_tumu(
@@ -1705,7 +1705,7 @@ vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_tumu(
@@ -1714,7 +1714,7 @@ vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_tumu(
@@ -1723,7 +1723,7 @@ vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_tumu(
@@ -1732,7 +1732,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_tumu(
@@ -1741,7 +1741,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_tumu(
@@ -1750,7 +1750,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_tumu(
@@ -1759,7 +1759,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_tumu(
@@ -1768,7 +1768,7 @@ vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_tumu(
@@ -1777,7 +1777,7 @@ vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_tumu(
@@ -1786,7 +1786,7 @@ vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_tumu(
@@ -1795,7 +1795,7 @@ vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_tumu(
@@ -1804,7 +1804,7 @@ vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_tumu(
@@ -1813,7 +1813,7 @@ vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_tumu(
@@ -1822,7 +1822,7 @@ vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tumu(
@@ -1831,7 +1831,7 @@ vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tumu(
@@ -1840,7 +1840,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_tumu(
@@ -1849,7 +1849,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_tumu(
@@ -1858,7 +1858,7 @@ vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_tumu(
@@ -1867,7 +1867,7 @@ vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_tumu(
@@ -1876,7 +1876,7 @@ vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_tumu(
@@ -1885,7 +1885,7 @@ vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_tumu(
@@ -1894,7 +1894,7 @@ vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_tumu(
@@ -1903,7 +1903,7 @@ vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_tumu(
@@ -1912,7 +1912,7 @@ vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_tumu(
@@ -1921,7 +1921,7 @@ vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_tumu(
@@ -1930,7 +1930,7 @@ vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_tumu(
@@ -1939,7 +1939,7 @@ vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_tumu(
@@ -1948,7 +1948,7 @@ vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_tumu(
@@ -1957,7 +1957,7 @@ vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_tumu(
@@ -1966,7 +1966,7 @@ vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_tumu(
@@ -1975,7 +1975,7 @@ vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_tumu(
@@ -1984,7 +1984,7 @@ vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_tumu(
@@ -1993,7 +1993,7 @@ vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_tumu(
@@ -2002,7 +2002,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_tumu(
@@ -2011,7 +2011,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_tumu(
@@ -2020,7 +2020,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_tumu(
@@ -2029,7 +2029,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_tumu(
@@ -2038,7 +2038,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_tumu(
@@ -2047,7 +2047,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_tumu(
@@ -2056,7 +2056,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_tumu(
@@ -2065,7 +2065,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_tumu(
@@ -2074,7 +2074,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_tumu(
@@ -2083,7 +2083,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_tumu(
@@ -2092,7 +2092,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_tumu(
@@ -2101,7 +2101,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_tumu(
@@ -2110,7 +2110,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_tumu(
@@ -2119,7 +2119,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_tumu(
@@ -2128,7 +2128,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_tumu(
@@ -2137,7 +2137,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_tumu(
@@ -2146,7 +2146,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_tumu(
@@ -2155,7 +2155,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_tumu(
@@ -2164,7 +2164,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_tumu(
@@ -2173,7 +2173,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_tumu(
@@ -2182,7 +2182,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_tumu(
@@ -2191,7 +2191,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_tumu(
@@ -2200,7 +2200,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_tumu(
@@ -2209,7 +2209,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_tumu(
@@ -2218,7 +2218,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tumu(
@@ -2227,7 +2227,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tumu(
@@ -2236,7 +2236,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_tumu(
@@ -2245,7 +2245,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_tumu(
@@ -2254,7 +2254,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_tumu(
@@ -2263,7 +2263,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_tumu(
@@ -2272,7 +2272,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_tumu(
@@ -2281,7 +2281,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_tumu(
@@ -2290,7 +2290,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_tumu(
@@ -2299,7 +2299,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_tumu(
@@ -2308,7 +2308,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_tumu(
@@ -2317,7 +2317,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_tumu(
@@ -2326,7 +2326,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_tumu(
@@ -2335,7 +2335,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_tumu(
@@ -2344,7 +2344,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_tumu(
@@ -2353,7 +2353,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_tumu(
@@ -2362,7 +2362,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_tumu(
@@ -2371,7 +2371,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_tumu(
@@ -2380,7 +2380,7 @@ vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_mu(
@@ -2389,7 +2389,7 @@ vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_mu(
@@ -2398,7 +2398,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_mu(
@@ -2407,7 +2407,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_mu(
@@ -2416,7 +2416,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_mu(
@@ -2425,7 +2425,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_mu(
@@ -2434,7 +2434,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_mu(
@@ -2443,7 +2443,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_mu(
@@ -2452,7 +2452,7 @@ vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_mu(
@@ -2461,7 +2461,7 @@ vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_mu(
@@ -2470,7 +2470,7 @@ vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_mu(
@@ -2479,7 +2479,7 @@ vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_mu(
@@ -2488,7 +2488,7 @@ vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_mu(
@@ -2497,7 +2497,7 @@ vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_mu(
@@ -2506,7 +2506,7 @@ vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_mu(
@@ -2515,7 +2515,7 @@ vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_mu(
@@ -2524,7 +2524,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_mu(
@@ -2533,7 +2533,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_mu(
@@ -2542,7 +2542,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_mu(
@@ -2551,7 +2551,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_mu(
@@ -2560,7 +2560,7 @@ vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_mu(
@@ -2569,7 +2569,7 @@ vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_mu(
@@ -2578,7 +2578,7 @@ vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_mu(
@@ -2587,7 +2587,7 @@ vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_mu(
@@ -2596,7 +2596,7 @@ vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_mu(
@@ -2605,7 +2605,7 @@ vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_mu(
@@ -2614,7 +2614,7 @@ vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_mu(
@@ -2623,7 +2623,7 @@ vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_mu(
@@ -2632,7 +2632,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_mu(
@@ -2641,7 +2641,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_mu(
@@ -2650,7 +2650,7 @@ vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_mu(
@@ -2659,7 +2659,7 @@ vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_mu(
@@ -2668,7 +2668,7 @@ vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_mu(
@@ -2677,7 +2677,7 @@ vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_mu(
@@ -2686,7 +2686,7 @@ vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_mu(
@@ -2695,7 +2695,7 @@ vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_mu(
@@ -2704,7 +2704,7 @@ vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_mu(
@@ -2713,7 +2713,7 @@ vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_mu(
@@ -2722,7 +2722,7 @@ vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_mu(
@@ -2731,7 +2731,7 @@ vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_mu(
@@ -2740,7 +2740,7 @@ vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_mu(
@@ -2749,7 +2749,7 @@ vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_mu(
@@ -2758,7 +2758,7 @@ vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_mu(
@@ -2767,7 +2767,7 @@ vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_mu(
@@ -2776,7 +2776,7 @@ vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_mu(
@@ -2785,7 +2785,7 @@ vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_mu(
@@ -2794,7 +2794,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_mu(
@@ -2803,7 +2803,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_mu(
@@ -2812,7 +2812,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_mu(
@@ -2821,7 +2821,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_mu(
@@ -2830,7 +2830,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_mu(
@@ -2839,7 +2839,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_mu(
@@ -2848,7 +2848,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_mu(
@@ -2857,7 +2857,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_mu(
@@ -2866,7 +2866,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_mu(
@@ -2875,7 +2875,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_mu(
@@ -2884,7 +2884,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_mu(
@@ -2893,7 +2893,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_mu(
@@ -2902,7 +2902,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_mu(
@@ -2911,7 +2911,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_mu(
@@ -2920,7 +2920,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_mu(
@@ -2929,7 +2929,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_mu(
@@ -2938,7 +2938,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_mu(
@@ -2947,7 +2947,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_mu(
@@ -2956,7 +2956,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_mu(
@@ -2965,7 +2965,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_mu(
@@ -2974,7 +2974,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_mu(
@@ -2983,7 +2983,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_mu(
@@ -2992,7 +2992,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_mu(
@@ -3001,7 +3001,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_mu(
@@ -3010,7 +3010,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_mu(
@@ -3019,7 +3019,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_mu(
@@ -3028,7 +3028,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_mu(
@@ -3037,7 +3037,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_mu(
@@ -3046,7 +3046,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_mu(
@@ -3055,7 +3055,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_mu(
@@ -3064,7 +3064,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_mu(
@@ -3073,7 +3073,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_mu(
@@ -3082,7 +3082,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_mu(
@@ -3091,7 +3091,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_mu(
@@ -3100,7 +3100,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_mu(
@@ -3109,7 +3109,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_mu(
@@ -3118,7 +3118,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_mu(
@@ -3127,7 +3127,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_mu(
@@ -3136,7 +3136,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_mu(
@@ -3145,7 +3145,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_mu(
@@ -3154,7 +3154,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_mu(
@@ -3163,7 +3163,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_mu(
@@ -3172,6 +3172,6 @@ vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsac_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsub.c
index 1b6d6ae76c58..c4a74b9dcd20 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsub.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_tu(
@@ -22,7 +22,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_tu(
@@ -31,7 +31,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_tu(
@@ -40,7 +40,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_tu(
@@ -49,7 +49,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_tu(
@@ -58,7 +58,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_tu(
@@ -67,7 +67,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_tu(
@@ -76,7 +76,7 @@ vint8m1_t test_vnmsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_tu(
@@ -85,7 +85,7 @@ vint8m1_t test_vnmsub_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_tu(
@@ -94,7 +94,7 @@ vint8m2_t test_vnmsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_tu(
@@ -103,7 +103,7 @@ vint8m2_t test_vnmsub_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_tu(
@@ -112,7 +112,7 @@ vint8m4_t test_vnmsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_tu(
@@ -121,7 +121,7 @@ vint8m4_t test_vnmsub_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_tu(
@@ -130,7 +130,7 @@ vint8m8_t test_vnmsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_tu(
@@ -139,7 +139,7 @@ vint8m8_t test_vnmsub_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_tu(
@@ -148,7 +148,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_tu(
@@ -157,7 +157,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_tu(
@@ -166,7 +166,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_tu(
@@ -175,7 +175,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_tu(
@@ -184,7 +184,7 @@ vint16m1_t test_vnmsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_tu(
@@ -193,7 +193,7 @@ vint16m1_t test_vnmsub_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_tu(
@@ -202,7 +202,7 @@ vint16m2_t test_vnmsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_tu(
@@ -211,7 +211,7 @@ vint16m2_t test_vnmsub_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_tu(
@@ -220,7 +220,7 @@ vint16m4_t test_vnmsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_tu(
@@ -229,7 +229,7 @@ vint16m4_t test_vnmsub_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_tu(
@@ -238,7 +238,7 @@ vint16m8_t test_vnmsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tu(
@@ -247,7 +247,7 @@ vint16m8_t test_vnmsub_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tu(
@@ -256,7 +256,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_tu(
@@ -265,7 +265,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_tu(
@@ -274,7 +274,7 @@ vint32m1_t test_vnmsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vnmsub_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vnmsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_tu(
@@ -301,7 +301,7 @@ vint32m2_t test_vnmsub_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_tu(
@@ -310,7 +310,7 @@ vint32m4_t test_vnmsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_tu(
@@ -319,7 +319,7 @@ vint32m4_t test_vnmsub_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_tu(
@@ -328,7 +328,7 @@ vint32m8_t test_vnmsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_tu(
@@ -337,7 +337,7 @@ vint32m8_t test_vnmsub_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_tu(
@@ -346,7 +346,7 @@ vint64m1_t test_vnmsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_tu(
@@ -355,7 +355,7 @@ vint64m1_t test_vnmsub_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_tu(
@@ -364,7 +364,7 @@ vint64m2_t test_vnmsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_tu(
@@ -373,7 +373,7 @@ vint64m2_t test_vnmsub_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_tu(
@@ -382,7 +382,7 @@ vint64m4_t test_vnmsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_tu(
@@ -391,7 +391,7 @@ vint64m4_t test_vnmsub_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_tu(
@@ -400,7 +400,7 @@ vint64m8_t test_vnmsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_tu(
@@ -409,7 +409,7 @@ vint64m8_t test_vnmsub_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_tu(
@@ -418,7 +418,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_tu(
@@ -427,7 +427,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_tu(
@@ -436,7 +436,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_tu(
@@ -445,7 +445,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_tu(
@@ -454,7 +454,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_tu(
@@ -463,7 +463,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_tu(
@@ -472,7 +472,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_tu(
@@ -481,7 +481,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_tu(
@@ -490,7 +490,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_tu(
@@ -499,7 +499,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_tu(
@@ -508,7 +508,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_tu(
@@ -517,7 +517,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_tu(
@@ -526,7 +526,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_tu(
@@ -535,7 +535,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_tu(
@@ -544,7 +544,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_tu(
@@ -553,7 +553,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_tu(
@@ -562,7 +562,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_tu(
@@ -571,7 +571,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_tu(
@@ -580,7 +580,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_tu(
@@ -589,7 +589,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_tu(
@@ -598,7 +598,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_tu(
@@ -607,7 +607,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_tu(
@@ -616,7 +616,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_tu(
@@ -625,7 +625,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_tu(
@@ -634,7 +634,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tu(
@@ -643,7 +643,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tu(
@@ -652,7 +652,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_tu(
@@ -661,7 +661,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_tu(
@@ -670,7 +670,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_tu(
@@ -679,7 +679,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_tu(
@@ -688,7 +688,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_tu(
@@ -697,7 +697,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_tu(
@@ -706,7 +706,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_tu(
@@ -715,7 +715,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_tu(
@@ -724,7 +724,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_tu(
@@ -733,7 +733,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_tu(
@@ -742,7 +742,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_tu(
@@ -751,7 +751,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_tu(
@@ -760,7 +760,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_tu(
@@ -769,7 +769,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_tu(
@@ -778,7 +778,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_tu(
@@ -787,7 +787,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_tu(
@@ -796,7 +796,7 @@ vuint64m8_t test_vnmsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub_tu(vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_tum(
@@ -805,7 +805,7 @@ vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_tum(
@@ -814,7 +814,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_tum(
@@ -823,7 +823,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_tum(
@@ -832,7 +832,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_tum(
@@ -841,7 +841,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_tum(
@@ -850,7 +850,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_tum(
@@ -859,7 +859,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_tum(
@@ -868,7 +868,7 @@ vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_tum(
@@ -877,7 +877,7 @@ vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_tum(
@@ -886,7 +886,7 @@ vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_tum(
@@ -895,7 +895,7 @@ vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_tum(
@@ -904,7 +904,7 @@ vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_tum(
@@ -913,7 +913,7 @@ vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_tum(
@@ -922,7 +922,7 @@ vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vi
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_tum(
@@ -931,7 +931,7 @@ vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_tum(
@@ -940,7 +940,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_tum(
@@ -949,7 +949,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_tum(
@@ -958,7 +958,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_tum(
@@ -967,7 +967,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_tum(
@@ -976,7 +976,7 @@ vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_tum(
@@ -985,7 +985,7 @@ vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_tum(
@@ -994,7 +994,7 @@ vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_tum(
@@ -1003,7 +1003,7 @@ vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_tum(
@@ -1012,7 +1012,7 @@ vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_tum(
@@ -1021,7 +1021,7 @@ vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_tum(
@@ -1030,7 +1030,7 @@ vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tum(
@@ -1039,7 +1039,7 @@ vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tum(
@@ -1048,7 +1048,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_tum(
@@ -1057,7 +1057,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_tum(
@@ -1066,7 +1066,7 @@ vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_tum(
@@ -1075,7 +1075,7 @@ vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_tum(
@@ -1084,7 +1084,7 @@ vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_tum(
@@ -1093,7 +1093,7 @@ vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_tum(
@@ -1102,7 +1102,7 @@ vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_tum(
@@ -1111,7 +1111,7 @@ vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_tum(
@@ -1120,7 +1120,7 @@ vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_tum(
@@ -1129,7 +1129,7 @@ vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_tum(
@@ -1138,7 +1138,7 @@ vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_tum(
@@ -1147,7 +1147,7 @@ vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_tum(
@@ -1156,7 +1156,7 @@ vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_tum(
@@ -1165,7 +1165,7 @@ vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_tum(
@@ -1174,7 +1174,7 @@ vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_tum(
@@ -1183,7 +1183,7 @@ vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_tum(
@@ -1192,7 +1192,7 @@ vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_tum(
@@ -1201,7 +1201,7 @@ vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_tum(
@@ -1210,7 +1210,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_tum(
@@ -1219,7 +1219,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_tum(
@@ -1228,7 +1228,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_tum(
@@ -1237,7 +1237,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_tum(
@@ -1246,7 +1246,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_tum(
@@ -1255,7 +1255,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_tum(
@@ -1264,7 +1264,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_tum(
@@ -1273,7 +1273,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_tum(
@@ -1282,7 +1282,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_tum(
@@ -1291,7 +1291,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_tum(
@@ -1300,7 +1300,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_tum(
@@ -1309,7 +1309,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_tum(
@@ -1318,7 +1318,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_tum(
@@ -1327,7 +1327,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_tum(
@@ -1336,7 +1336,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_tum(
@@ -1345,7 +1345,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_tum(
@@ -1354,7 +1354,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_tum(
@@ -1363,7 +1363,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_tum(
@@ -1372,7 +1372,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_tum(
@@ -1381,7 +1381,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_tum(
@@ -1390,7 +1390,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_tum(
@@ -1399,7 +1399,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_tum(
@@ -1408,7 +1408,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_tum(
@@ -1417,7 +1417,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_tum(
@@ -1426,7 +1426,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tum(
@@ -1435,7 +1435,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tum(
@@ -1444,7 +1444,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_tum(
@@ -1453,7 +1453,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_tum(
@@ -1462,7 +1462,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_tum(
@@ -1471,7 +1471,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_tum(
@@ -1480,7 +1480,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_tum(
@@ -1489,7 +1489,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_tum(
@@ -1498,7 +1498,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_tum(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_tum(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_tum(
@@ -1525,7 +1525,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_tum(
@@ -1534,7 +1534,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_tum(
@@ -1543,7 +1543,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_tum(
@@ -1552,7 +1552,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_tum(
@@ -1561,7 +1561,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_tum(
@@ -1570,7 +1570,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_tum(
@@ -1579,7 +1579,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_tum(
@@ -1588,7 +1588,7 @@ vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_tumu(
@@ -1597,7 +1597,7 @@ vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_tumu(
@@ -1606,7 +1606,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_tumu(
@@ -1615,7 +1615,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_tumu(
@@ -1624,7 +1624,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_tumu(
@@ -1633,7 +1633,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_tumu(
@@ -1642,7 +1642,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_tumu(
@@ -1651,7 +1651,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_tumu(
@@ -1660,7 +1660,7 @@ vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_tumu(
@@ -1669,7 +1669,7 @@ vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_tumu(
@@ -1678,7 +1678,7 @@ vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_tumu(
@@ -1687,7 +1687,7 @@ vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_tumu(
@@ -1696,7 +1696,7 @@ vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_tumu(
@@ -1705,7 +1705,7 @@ vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_tumu(
@@ -1714,7 +1714,7 @@ vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_tumu(
@@ -1723,7 +1723,7 @@ vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_tumu(
@@ -1732,7 +1732,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_tumu(
@@ -1741,7 +1741,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_tumu(
@@ -1750,7 +1750,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_tumu(
@@ -1759,7 +1759,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_tumu(
@@ -1768,7 +1768,7 @@ vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_tumu(
@@ -1777,7 +1777,7 @@ vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_tumu(
@@ -1786,7 +1786,7 @@ vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_tumu(
@@ -1795,7 +1795,7 @@ vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_tumu(
@@ -1804,7 +1804,7 @@ vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_tumu(
@@ -1813,7 +1813,7 @@ vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_tumu(
@@ -1822,7 +1822,7 @@ vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tumu(
@@ -1831,7 +1831,7 @@ vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tumu(
@@ -1840,7 +1840,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_tumu(
@@ -1849,7 +1849,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_tumu(
@@ -1858,7 +1858,7 @@ vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_tumu(
@@ -1867,7 +1867,7 @@ vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_tumu(
@@ -1876,7 +1876,7 @@ vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_tumu(
@@ -1885,7 +1885,7 @@ vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_tumu(
@@ -1894,7 +1894,7 @@ vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_tumu(
@@ -1903,7 +1903,7 @@ vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_tumu(
@@ -1912,7 +1912,7 @@ vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_tumu(
@@ -1921,7 +1921,7 @@ vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_tumu(
@@ -1930,7 +1930,7 @@ vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_tumu(
@@ -1939,7 +1939,7 @@ vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_tumu(
@@ -1948,7 +1948,7 @@ vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_tumu(
@@ -1957,7 +1957,7 @@ vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_tumu(
@@ -1966,7 +1966,7 @@ vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_tumu(
@@ -1975,7 +1975,7 @@ vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_tumu(
@@ -1984,7 +1984,7 @@ vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_tumu(
@@ -1993,7 +1993,7 @@ vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_tumu(
@@ -2002,7 +2002,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_tumu(
@@ -2011,7 +2011,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_tumu(
@@ -2020,7 +2020,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_tumu(
@@ -2029,7 +2029,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_tumu(
@@ -2038,7 +2038,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_tumu(
@@ -2047,7 +2047,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_tumu(
@@ -2056,7 +2056,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_tumu(
@@ -2065,7 +2065,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_tumu(
@@ -2074,7 +2074,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_tumu(
@@ -2083,7 +2083,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_tumu(
@@ -2092,7 +2092,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_tumu(
@@ -2101,7 +2101,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_tumu(
@@ -2110,7 +2110,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_tumu(
@@ -2119,7 +2119,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_tumu(
@@ -2128,7 +2128,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_tumu(
@@ -2137,7 +2137,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_tumu(
@@ -2146,7 +2146,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_tumu(
@@ -2155,7 +2155,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_tumu(
@@ -2164,7 +2164,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_tumu(
@@ -2173,7 +2173,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_tumu(
@@ -2182,7 +2182,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_tumu(
@@ -2191,7 +2191,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_tumu(
@@ -2200,7 +2200,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_tumu(
@@ -2209,7 +2209,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_tumu(
@@ -2218,7 +2218,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tumu(
@@ -2227,7 +2227,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tumu(
@@ -2236,7 +2236,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_tumu(
@@ -2245,7 +2245,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_tumu(
@@ -2254,7 +2254,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_tumu(
@@ -2263,7 +2263,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_tumu(
@@ -2272,7 +2272,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_tumu(
@@ -2281,7 +2281,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_tumu(
@@ -2290,7 +2290,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_tumu(
@@ -2299,7 +2299,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_tumu(
@@ -2308,7 +2308,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_tumu(
@@ -2317,7 +2317,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_tumu(
@@ -2326,7 +2326,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_tumu(
@@ -2335,7 +2335,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_tumu(
@@ -2344,7 +2344,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_tumu(
@@ -2353,7 +2353,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_tumu(
@@ -2362,7 +2362,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_tumu(
@@ -2371,7 +2371,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_tumu(
@@ -2380,7 +2380,7 @@ vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_mu(
@@ -2389,7 +2389,7 @@ vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_mu(
@@ -2398,7 +2398,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_mu(
@@ -2407,7 +2407,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_mu(
@@ -2416,7 +2416,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_mu(
@@ -2425,7 +2425,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_mu(
@@ -2434,7 +2434,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_mu(
@@ -2443,7 +2443,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_mu(
@@ -2452,7 +2452,7 @@ vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_mu(
@@ -2461,7 +2461,7 @@ vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_mu(
@@ -2470,7 +2470,7 @@ vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_mu(
@@ -2479,7 +2479,7 @@ vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_mu(
@@ -2488,7 +2488,7 @@ vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_mu(
@@ -2497,7 +2497,7 @@ vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_mu(
@@ -2506,7 +2506,7 @@ vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_mu(
@@ -2515,7 +2515,7 @@ vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_mu(
@@ -2524,7 +2524,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_mu(
@@ -2533,7 +2533,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_mu(
@@ -2542,7 +2542,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_mu(
@@ -2551,7 +2551,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_mu(
@@ -2560,7 +2560,7 @@ vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_mu(
@@ -2569,7 +2569,7 @@ vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_mu(
@@ -2578,7 +2578,7 @@ vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_mu(
@@ -2587,7 +2587,7 @@ vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_mu(
@@ -2596,7 +2596,7 @@ vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_mu(
@@ -2605,7 +2605,7 @@ vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_mu(
@@ -2614,7 +2614,7 @@ vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_mu(
@@ -2623,7 +2623,7 @@ vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_mu(
@@ -2632,7 +2632,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_mu(
@@ -2641,7 +2641,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_mu(
@@ -2650,7 +2650,7 @@ vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_mu(
@@ -2659,7 +2659,7 @@ vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_mu(
@@ -2668,7 +2668,7 @@ vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_mu(
@@ -2677,7 +2677,7 @@ vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_mu(
@@ -2686,7 +2686,7 @@ vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_mu(
@@ -2695,7 +2695,7 @@ vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_mu(
@@ -2704,7 +2704,7 @@ vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_mu(
@@ -2713,7 +2713,7 @@ vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_mu(
@@ -2722,7 +2722,7 @@ vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_mu(
@@ -2731,7 +2731,7 @@ vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_mu(
@@ -2740,7 +2740,7 @@ vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_mu(
@@ -2749,7 +2749,7 @@ vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_mu(
@@ -2758,7 +2758,7 @@ vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_mu(
@@ -2767,7 +2767,7 @@ vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_mu(
@@ -2776,7 +2776,7 @@ vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_mu(
@@ -2785,7 +2785,7 @@ vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_mu(
@@ -2794,7 +2794,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_mu(
@@ -2803,7 +2803,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_mu(
@@ -2812,7 +2812,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_mu(
@@ -2821,7 +2821,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_mu(
@@ -2830,7 +2830,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_mu(
@@ -2839,7 +2839,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_mu(
@@ -2848,7 +2848,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_mu(
@@ -2857,7 +2857,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_mu(
@@ -2866,7 +2866,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_mu(
@@ -2875,7 +2875,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_mu(
@@ -2884,7 +2884,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_mu(
@@ -2893,7 +2893,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_mu(
@@ -2902,7 +2902,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_mu(
@@ -2911,7 +2911,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_mu(
@@ -2920,7 +2920,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_mu(
@@ -2929,7 +2929,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_mu(
@@ -2938,7 +2938,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_mu(
@@ -2947,7 +2947,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_mu(
@@ -2956,7 +2956,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_mu(
@@ -2965,7 +2965,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_mu(
@@ -2974,7 +2974,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_mu(
@@ -2983,7 +2983,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_mu(
@@ -2992,7 +2992,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_mu(
@@ -3001,7 +3001,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_mu(
@@ -3010,7 +3010,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_mu(
@@ -3019,7 +3019,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_mu(
@@ -3028,7 +3028,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_mu(
@@ -3037,7 +3037,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_mu(
@@ -3046,7 +3046,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_mu(
@@ -3055,7 +3055,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_mu(
@@ -3064,7 +3064,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_mu(
@@ -3073,7 +3073,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_mu(
@@ -3082,7 +3082,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_mu(
@@ -3091,7 +3091,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_mu(
@@ -3100,7 +3100,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_mu(
@@ -3109,7 +3109,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_mu(
@@ -3118,7 +3118,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_mu(
@@ -3127,7 +3127,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_mu(
@@ -3136,7 +3136,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_mu(
@@ -3145,7 +3145,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_mu(
@@ -3154,7 +3154,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_mu(
@@ -3163,7 +3163,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_mu(
@@ -3172,6 +3172,6 @@ vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) {
- return vnmsub_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnot.c
index bf50cd0adc4e..646e4955cf40 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnot.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnot_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vnot_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnot_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2_tu(
@@ -30,7 +30,7 @@ vint8mf4_t test_vnot_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnot_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m1_tu(
@@ -39,7 +39,7 @@ vint8mf2_t test_vnot_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnot_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m2_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vnot_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnot_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m4_tu(
@@ -57,7 +57,7 @@ vint8m2_t test_vnot_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnot_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m8_tu(
@@ -66,7 +66,7 @@ vint8m4_t test_vnot_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnot_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4_tu(
@@ -75,7 +75,7 @@ vint8m8_t test_vnot_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnot_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2_tu(
@@ -84,7 +84,7 @@ vint16mf4_t test_vnot_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnot_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m1_tu(
@@ -93,7 +93,7 @@ vint16mf2_t test_vnot_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnot_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m2_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vnot_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnot_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m4_tu(
@@ -111,7 +111,7 @@ vint16m2_t test_vnot_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnot_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m8_tu(
@@ -120,7 +120,7 @@ vint16m4_t test_vnot_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnot_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tu(
@@ -129,7 +129,7 @@ vint16m8_t test_vnot_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m1_tu(
@@ -138,7 +138,7 @@ vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnot_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m2_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vnot_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnot_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m4_tu(
@@ -156,7 +156,7 @@ vint32m2_t test_vnot_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnot_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m8_tu(
@@ -165,7 +165,7 @@ vint32m4_t test_vnot_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnot_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m8_t test_vnot_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnot_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m2_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vnot_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnot_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m4_tu(
@@ -192,7 +192,7 @@ vint64m2_t test_vnot_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnot_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m8_tu(
@@ -201,7 +201,7 @@ vint64m4_t test_vnot_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnot_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8_tu(
@@ -210,7 +210,7 @@ vint64m8_t test_vnot_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnot_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4_tu(
@@ -219,7 +219,7 @@ vuint8mf8_t test_vnot_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnot_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2_tu(
@@ -228,7 +228,7 @@ vuint8mf4_t test_vnot_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnot_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m1_tu(
@@ -237,7 +237,7 @@ vuint8mf2_t test_vnot_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnot_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m2_tu(
@@ -246,7 +246,7 @@ vuint8m1_t test_vnot_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnot_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m4_tu(
@@ -255,7 +255,7 @@ vuint8m2_t test_vnot_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnot_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m8_tu(
@@ -264,7 +264,7 @@ vuint8m4_t test_vnot_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnot_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4_tu(
@@ -273,7 +273,7 @@ vuint8m8_t test_vnot_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2_tu(
@@ -282,7 +282,7 @@ vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m1_tu(
@@ -291,7 +291,7 @@ vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnot_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m2_tu(
@@ -300,7 +300,7 @@ vuint16m1_t test_vnot_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnot_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m4_tu(
@@ -309,7 +309,7 @@ vuint16m2_t test_vnot_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnot_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m8_tu(
@@ -318,7 +318,7 @@ vuint16m4_t test_vnot_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnot_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tu(
@@ -327,7 +327,7 @@ vuint16m8_t test_vnot_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m1_tu(
@@ -336,7 +336,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnot_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m2_tu(
@@ -345,7 +345,7 @@ vuint32m1_t test_vnot_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnot_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m4_tu(
@@ -354,7 +354,7 @@ vuint32m2_t test_vnot_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnot_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m8_tu(
@@ -363,7 +363,7 @@ vuint32m4_t test_vnot_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnot_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m1_tu(
@@ -372,7 +372,7 @@ vuint32m8_t test_vnot_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnot_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m2_tu(
@@ -381,7 +381,7 @@ vuint64m1_t test_vnot_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnot_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m4_tu(
@@ -390,7 +390,7 @@ vuint64m2_t test_vnot_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnot_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m4_t test_vnot_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnot_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) {
- return vnot_tu(maskedoff, op1, vl);
+ return __riscv_vnot_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vnot_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2_tum(
@@ -426,7 +426,7 @@ vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m1_tum(
@@ -435,7 +435,7 @@ vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnot_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m2_tum(
@@ -444,7 +444,7 @@ vint8m1_t test_vnot_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnot_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m4_tum(
@@ -453,7 +453,7 @@ vint8m2_t test_vnot_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnot_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m8_tum(
@@ -462,7 +462,7 @@ vint8m4_t test_vnot_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnot_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4_tum(
@@ -471,7 +471,7 @@ vint8m8_t test_vnot_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2_tum(
@@ -480,7 +480,7 @@ vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m1_tum(
@@ -489,7 +489,7 @@ vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnot_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m2_tum(
@@ -498,7 +498,7 @@ vint16m1_t test_vnot_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnot_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m4_tum(
@@ -507,7 +507,7 @@ vint16m2_t test_vnot_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnot_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m8_tum(
@@ -516,7 +516,7 @@ vint16m4_t test_vnot_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnot_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tum(
@@ -525,7 +525,7 @@ vint16m8_t test_vnot_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m1_tum(
@@ -534,7 +534,7 @@ vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnot_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m2_tum(
@@ -543,7 +543,7 @@ vint32m1_t test_vnot_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnot_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m4_tum(
@@ -552,7 +552,7 @@ vint32m2_t test_vnot_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnot_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m8_tum(
@@ -561,7 +561,7 @@ vint32m4_t test_vnot_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnot_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m1_tum(
@@ -570,7 +570,7 @@ vint32m8_t test_vnot_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnot_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m2_tum(
@@ -579,7 +579,7 @@ vint64m1_t test_vnot_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnot_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m4_tum(
@@ -588,7 +588,7 @@ vint64m2_t test_vnot_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnot_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m8_tum(
@@ -597,7 +597,7 @@ vint64m4_t test_vnot_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnot_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8_tum(
@@ -606,7 +606,7 @@ vint64m8_t test_vnot_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4_tum(
@@ -615,7 +615,7 @@ vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2_tum(
@@ -624,7 +624,7 @@ vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m1_tum(
@@ -633,7 +633,7 @@ vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m2_tum(
@@ -642,7 +642,7 @@ vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m4_tum(
@@ -651,7 +651,7 @@ vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m8_tum(
@@ -660,7 +660,7 @@ vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4_tum(
@@ -669,7 +669,7 @@ vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2_tum(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m1_tum(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m2_tum(
@@ -696,7 +696,7 @@ vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m4_tum(
@@ -705,7 +705,7 @@ vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m8_tum(
@@ -714,7 +714,7 @@ vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tum(
@@ -723,7 +723,7 @@ vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m1_tum(
@@ -732,7 +732,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m2_tum(
@@ -741,7 +741,7 @@ vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m4_tum(
@@ -750,7 +750,7 @@ vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m8_tum(
@@ -759,7 +759,7 @@ vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m1_tum(
@@ -768,7 +768,7 @@ vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m2_tum(
@@ -777,7 +777,7 @@ vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m4_tum(
@@ -786,7 +786,7 @@ vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) {
- return vnot_tum(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2_tumu(
@@ -822,7 +822,7 @@ vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m1_tumu(
@@ -831,7 +831,7 @@ vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m2_tumu(
@@ -840,7 +840,7 @@ vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m4_tumu(
@@ -849,7 +849,7 @@ vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m8_tumu(
@@ -858,7 +858,7 @@ vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4_tumu(
@@ -867,7 +867,7 @@ vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2_tumu(
@@ -876,7 +876,7 @@ vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m1_tumu(
@@ -885,7 +885,7 @@ vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m2_tumu(
@@ -894,7 +894,7 @@ vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m4_tumu(
@@ -903,7 +903,7 @@ vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m8_tumu(
@@ -912,7 +912,7 @@ vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tumu(
@@ -921,7 +921,7 @@ vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m1_tumu(
@@ -930,7 +930,7 @@ vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m2_tumu(
@@ -939,7 +939,7 @@ vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m4_tumu(
@@ -948,7 +948,7 @@ vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m8_tumu(
@@ -957,7 +957,7 @@ vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m1_tumu(
@@ -966,7 +966,7 @@ vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m2_tumu(
@@ -975,7 +975,7 @@ vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m4_tumu(
@@ -984,7 +984,7 @@ vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m8_tumu(
@@ -993,7 +993,7 @@ vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8_tumu(
@@ -1002,7 +1002,7 @@ vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4_tumu(
@@ -1011,7 +1011,7 @@ vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2_tumu(
@@ -1020,7 +1020,7 @@ vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m1_tumu(
@@ -1029,7 +1029,7 @@ vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m2_tumu(
@@ -1038,7 +1038,7 @@ vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m4_tumu(
@@ -1047,7 +1047,7 @@ vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m8_tumu(
@@ -1056,7 +1056,7 @@ vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4_tumu(
@@ -1065,7 +1065,7 @@ vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2_tumu(
@@ -1074,7 +1074,7 @@ vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m1_tumu(
@@ -1083,7 +1083,7 @@ vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m2_tumu(
@@ -1092,7 +1092,7 @@ vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m4_tumu(
@@ -1101,7 +1101,7 @@ vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m8_tumu(
@@ -1110,7 +1110,7 @@ vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tumu(
@@ -1119,7 +1119,7 @@ vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m2_tumu(
@@ -1137,7 +1137,7 @@ vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m4_tumu(
@@ -1146,7 +1146,7 @@ vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m8_tumu(
@@ -1155,7 +1155,7 @@ vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m1_tumu(
@@ -1164,7 +1164,7 @@ vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m2_tumu(
@@ -1173,7 +1173,7 @@ vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m4_tumu(
@@ -1182,7 +1182,7 @@ vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) {
- return vnot_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf4_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8mf2_mu(
@@ -1218,7 +1218,7 @@ vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m1_mu(
@@ -1227,7 +1227,7 @@ vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnot_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m2_mu(
@@ -1236,7 +1236,7 @@ vint8m1_t test_vnot_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnot_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m4_mu(
@@ -1245,7 +1245,7 @@ vint8m2_t test_vnot_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnot_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i8m8_mu(
@@ -1254,7 +1254,7 @@ vint8m4_t test_vnot_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnot_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf4_mu(
@@ -1263,7 +1263,7 @@ vint8m8_t test_vnot_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16mf2_mu(
@@ -1272,7 +1272,7 @@ vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m1_mu(
@@ -1281,7 +1281,7 @@ vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnot_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m2_mu(
@@ -1290,7 +1290,7 @@ vint16m1_t test_vnot_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnot_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m4_mu(
@@ -1299,7 +1299,7 @@ vint16m2_t test_vnot_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnot_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i16m8_mu(
@@ -1308,7 +1308,7 @@ vint16m4_t test_vnot_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnot_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_mu(
@@ -1317,7 +1317,7 @@ vint16m8_t test_vnot_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m1_mu(
@@ -1326,7 +1326,7 @@ vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnot_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m2_mu(
@@ -1335,7 +1335,7 @@ vint32m1_t test_vnot_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnot_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m4_mu(
@@ -1344,7 +1344,7 @@ vint32m2_t test_vnot_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnot_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i32m8_mu(
@@ -1353,7 +1353,7 @@ vint32m4_t test_vnot_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnot_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m1_mu(
@@ -1362,7 +1362,7 @@ vint32m8_t test_vnot_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnot_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m2_mu(
@@ -1371,7 +1371,7 @@ vint64m1_t test_vnot_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnot_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m4_mu(
@@ -1380,7 +1380,7 @@ vint64m2_t test_vnot_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnot_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_i64m8_mu(
@@ -1389,7 +1389,7 @@ vint64m4_t test_vnot_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnot_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf8_mu(
@@ -1398,7 +1398,7 @@ vint64m8_t test_vnot_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf4_mu(
@@ -1407,7 +1407,7 @@ vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8mf2_mu(
@@ -1416,7 +1416,7 @@ vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m1_mu(
@@ -1425,7 +1425,7 @@ vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m2_mu(
@@ -1434,7 +1434,7 @@ vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m4_mu(
@@ -1443,7 +1443,7 @@ vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u8m8_mu(
@@ -1452,7 +1452,7 @@ vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf4_mu(
@@ -1461,7 +1461,7 @@ vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16mf2_mu(
@@ -1470,7 +1470,7 @@ vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m1_mu(
@@ -1479,7 +1479,7 @@ vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m2_mu(
@@ -1488,7 +1488,7 @@ vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m4_mu(
@@ -1497,7 +1497,7 @@ vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u16m8_mu(
@@ -1506,7 +1506,7 @@ vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_mu(
@@ -1515,7 +1515,7 @@ vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m1_mu(
@@ -1524,7 +1524,7 @@ vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m2_mu(
@@ -1533,7 +1533,7 @@ vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m4_mu(
@@ -1542,7 +1542,7 @@ vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u32m8_mu(
@@ -1551,7 +1551,7 @@ vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m1_mu(
@@ -1560,7 +1560,7 @@ vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m2_mu(
@@ -1569,7 +1569,7 @@ vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m4_mu(
@@ -1578,7 +1578,7 @@ vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vnot_v_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnot_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) {
- return vnot_mu(mask, maskedoff, op1, vl);
+ return __riscv_vnot_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsra.c
index 0a857e795acb..bf1db6ff36e7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsra.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shif
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_tu(
@@ -129,7 +129,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_tu(
@@ -138,7 +138,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_tu(
@@ -147,7 +147,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_tu(
@@ -156,7 +156,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_tu(
@@ -165,7 +165,7 @@ vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_tu(
@@ -174,7 +174,7 @@ vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_tu(
@@ -183,7 +183,7 @@ vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_tu(
@@ -192,7 +192,7 @@ vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_tu(
@@ -201,7 +201,7 @@ vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tu(
@@ -210,7 +210,7 @@ vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tu(
@@ -219,7 +219,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_tu(
@@ -228,7 +228,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_tu(
@@ -237,7 +237,7 @@ vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_tu(
@@ -246,7 +246,7 @@ vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_tu(
@@ -255,7 +255,7 @@ vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_tu(
@@ -264,7 +264,7 @@ vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_tu(
@@ -273,7 +273,7 @@ vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vnsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_tum(
@@ -282,7 +282,7 @@ vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_tum(
@@ -291,7 +291,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_tum(
@@ -300,7 +300,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_tum(
@@ -309,7 +309,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_tum(
@@ -318,7 +318,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_tum(
@@ -327,7 +327,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_tum(
@@ -336,7 +336,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_tum(
@@ -345,7 +345,7 @@ vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_tum(
@@ -354,7 +354,7 @@ vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_tum(
@@ -363,7 +363,7 @@ vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_tum(
@@ -372,7 +372,7 @@ vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_tum(
@@ -381,7 +381,7 @@ vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_tum(
@@ -390,7 +390,7 @@ vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_tum(
@@ -399,7 +399,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_tum(
@@ -408,7 +408,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_tum(
@@ -417,7 +417,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_tum(
@@ -426,7 +426,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_tum(
@@ -435,7 +435,7 @@ vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_tum(
@@ -444,7 +444,7 @@ vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_tum(
@@ -453,7 +453,7 @@ vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_tum(
@@ -462,7 +462,7 @@ vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_tum(
@@ -471,7 +471,7 @@ vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tum(
@@ -480,7 +480,7 @@ vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tum(
@@ -489,7 +489,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_tum(
@@ -498,7 +498,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_tum(
@@ -507,7 +507,7 @@ vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_tum(
@@ -516,7 +516,7 @@ vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_tum(
@@ -525,7 +525,7 @@ vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_tum(
@@ -534,7 +534,7 @@ vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_tum(
@@ -543,7 +543,7 @@ vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vnsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_tumu(
@@ -552,7 +552,7 @@ vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_tumu(
@@ -561,7 +561,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_tumu(
@@ -570,7 +570,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_tumu(
@@ -579,7 +579,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_tumu(
@@ -588,7 +588,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_tumu(
@@ -597,7 +597,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_tumu(
@@ -606,7 +606,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_tumu(
@@ -615,7 +615,7 @@ vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_tumu(
@@ -624,7 +624,7 @@ vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_tumu(
@@ -633,7 +633,7 @@ vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_tumu(
@@ -642,7 +642,7 @@ vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_tumu(
@@ -651,7 +651,7 @@ vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_tumu(
@@ -660,7 +660,7 @@ vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_tumu(
@@ -669,7 +669,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_tumu(
@@ -678,7 +678,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_tumu(
@@ -687,7 +687,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_tumu(
@@ -696,7 +696,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_tumu(
@@ -705,7 +705,7 @@ vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_tumu(
@@ -714,7 +714,7 @@ vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_tumu(
@@ -723,7 +723,7 @@ vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_tumu(
@@ -732,7 +732,7 @@ vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_tumu(
@@ -741,7 +741,7 @@ vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tumu(
@@ -750,7 +750,7 @@ vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tumu(
@@ -759,7 +759,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_tumu(
@@ -768,7 +768,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_tumu(
@@ -777,7 +777,7 @@ vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_tumu(
@@ -786,7 +786,7 @@ vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_tumu(
@@ -795,7 +795,7 @@ vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_tumu(
@@ -804,7 +804,7 @@ vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_tumu(
@@ -813,7 +813,7 @@ vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vnsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_mu(
@@ -822,7 +822,7 @@ vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_mu(
@@ -831,7 +831,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_mu(
@@ -840,7 +840,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_mu(
@@ -849,7 +849,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_mu(
@@ -858,7 +858,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_mu(
@@ -867,7 +867,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_mu(
@@ -876,7 +876,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_mu(
@@ -885,7 +885,7 @@ vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_mu(
@@ -894,7 +894,7 @@ vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_mu(
@@ -903,7 +903,7 @@ vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_mu(
@@ -912,7 +912,7 @@ vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_mu(
@@ -921,7 +921,7 @@ vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_mu(
@@ -930,7 +930,7 @@ vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_mu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_mu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_mu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_mu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_mu(
@@ -975,7 +975,7 @@ vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_mu(
@@ -984,7 +984,7 @@ vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_mu(
@@ -993,7 +993,7 @@ vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_mu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_mu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_mu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_mu(
@@ -1029,7 +1029,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_mu(
@@ -1038,7 +1038,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_mu(
@@ -1047,7 +1047,7 @@ vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_mu(
@@ -1056,7 +1056,7 @@ vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_mu(
@@ -1065,7 +1065,7 @@ vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_mu(
@@ -1074,7 +1074,7 @@ vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_mu(
@@ -1083,6 +1083,6 @@ vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnsra_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vnsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsrl.c
index bdc432e31007..0285b81073c1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnsrl.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_tu(
@@ -129,7 +129,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_tu(
@@ -138,7 +138,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_tu(
@@ -147,7 +147,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_tu(
@@ -156,7 +156,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_tu(
@@ -165,7 +165,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_tu(
@@ -174,7 +174,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_tu(
@@ -183,7 +183,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_tu(
@@ -192,7 +192,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_tu(
@@ -201,7 +201,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tu(
@@ -210,7 +210,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tu(
@@ -219,7 +219,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_tu(
@@ -228,7 +228,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_tu(
@@ -237,7 +237,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_tu(
@@ -246,7 +246,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_tu(
@@ -255,7 +255,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_tu(
@@ -264,7 +264,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_tu(
@@ -273,7 +273,7 @@ vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_tum(
@@ -282,7 +282,7 @@ vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_tum(
@@ -291,7 +291,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_tum(
@@ -300,7 +300,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_tum(
@@ -309,7 +309,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_tum(
@@ -318,7 +318,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_tum(
@@ -327,7 +327,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_tum(
@@ -336,7 +336,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_tum(
@@ -345,7 +345,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_tum(
@@ -354,7 +354,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_tum(
@@ -363,7 +363,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_tum(
@@ -372,7 +372,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_tum(
@@ -381,7 +381,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_tum(
@@ -390,7 +390,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_tum(
@@ -399,7 +399,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_tum(
@@ -408,7 +408,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_tum(
@@ -417,7 +417,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_tum(
@@ -426,7 +426,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_tum(
@@ -435,7 +435,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_tum(
@@ -444,7 +444,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_tum(
@@ -453,7 +453,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_tum(
@@ -462,7 +462,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_tum(
@@ -471,7 +471,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tum(
@@ -480,7 +480,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tum(
@@ -489,7 +489,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_tum(
@@ -498,7 +498,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_tum(
@@ -507,7 +507,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_tum(
@@ -516,7 +516,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_tum(
@@ -525,7 +525,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_tum(
@@ -534,7 +534,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_tum(
@@ -543,7 +543,7 @@ vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_tumu(
@@ -552,7 +552,7 @@ vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_tumu(
@@ -561,7 +561,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_tumu(
@@ -570,7 +570,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_tumu(
@@ -579,7 +579,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_tumu(
@@ -588,7 +588,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_tumu(
@@ -597,7 +597,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_tumu(
@@ -606,7 +606,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_tumu(
@@ -615,7 +615,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_tumu(
@@ -624,7 +624,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_tumu(
@@ -633,7 +633,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_tumu(
@@ -642,7 +642,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_tumu(
@@ -651,7 +651,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_tumu(
@@ -660,7 +660,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_tumu(
@@ -669,7 +669,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_tumu(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_tumu(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_tumu(
@@ -696,7 +696,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_tumu(
@@ -705,7 +705,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_tumu(
@@ -714,7 +714,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_tumu(
@@ -723,7 +723,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_tumu(
@@ -732,7 +732,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_tumu(
@@ -741,7 +741,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tumu(
@@ -750,7 +750,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tumu(
@@ -759,7 +759,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_tumu(
@@ -768,7 +768,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_tumu(
@@ -777,7 +777,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_tumu(
@@ -786,7 +786,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_tumu(
@@ -795,7 +795,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_tumu(
@@ -804,7 +804,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_tumu(
@@ -813,7 +813,7 @@ vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_mu(
@@ -822,7 +822,7 @@ vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_mu(
@@ -831,7 +831,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_mu(
@@ -840,7 +840,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_mu(
@@ -849,7 +849,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_mu(
@@ -858,7 +858,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_mu(
@@ -867,7 +867,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_mu(
@@ -876,7 +876,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_mu(
@@ -885,7 +885,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_mu(
@@ -894,7 +894,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_mu(
@@ -903,7 +903,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_mu(
@@ -912,7 +912,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_mu(
@@ -921,7 +921,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_mu(
@@ -930,7 +930,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_mu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_mu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_mu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_mu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_mu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_mu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_mu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_mu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_mu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_mu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_mu(
@@ -1029,7 +1029,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_mu(
@@ -1038,7 +1038,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_mu(
@@ -1047,7 +1047,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_mu(
@@ -1056,7 +1056,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_mu(
@@ -1065,7 +1065,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_mu(
@@ -1074,7 +1074,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_mu(
@@ -1083,6 +1083,6 @@ vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnsrl_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vnsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vor.c
index e122a925338a..462c78424ce1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_tu(
@@ -408,7 +408,7 @@ vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_tu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_tu(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_tu(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_tu(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_tu(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m1_tu(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m1_tu(
@@ -471,7 +471,7 @@ vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m2_tu(
@@ -480,7 +480,7 @@ vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m2_tu(
@@ -489,7 +489,7 @@ vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m4_tu(
@@ -498,7 +498,7 @@ vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m4_tu(
@@ -507,7 +507,7 @@ vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m8_tu(
@@ -516,7 +516,7 @@ vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m8_tu(
@@ -525,7 +525,7 @@ vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_tu(
@@ -534,7 +534,7 @@ vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_tu(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_tu(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_tu(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m1_tu(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m1_tu(
@@ -579,7 +579,7 @@ vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m2_tu(
@@ -588,7 +588,7 @@ vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m2_tu(
@@ -597,7 +597,7 @@ vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m4_tu(
@@ -606,7 +606,7 @@ vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m4_tu(
@@ -615,7 +615,7 @@ vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m8_tu(
@@ -624,7 +624,7 @@ vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m8_tu(
@@ -633,7 +633,7 @@ vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tu(
@@ -642,7 +642,7 @@ vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tu(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m1_tu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m1_tu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m2_tu(
@@ -678,7 +678,7 @@ vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m2_tu(
@@ -687,7 +687,7 @@ vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m4_tu(
@@ -696,7 +696,7 @@ vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m4_tu(
@@ -705,7 +705,7 @@ vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m8_tu(
@@ -714,7 +714,7 @@ vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m8_tu(
@@ -723,7 +723,7 @@ vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m1_tu(
@@ -732,7 +732,7 @@ vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m1_tu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m2_tu(
@@ -750,7 +750,7 @@ vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m2_tu(
@@ -759,7 +759,7 @@ vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m4_tu(
@@ -768,7 +768,7 @@ vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m4_tu(
@@ -777,7 +777,7 @@ vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m8_tu(
@@ -786,7 +786,7 @@ vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m8_tu(
@@ -795,7 +795,7 @@ vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_tum(
@@ -804,7 +804,7 @@ vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_tum(
@@ -813,7 +813,7 @@ vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_tum(
@@ -822,7 +822,7 @@ vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_tum(
@@ -831,7 +831,7 @@ vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_tum(
@@ -840,7 +840,7 @@ vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_tum(
@@ -849,7 +849,7 @@ vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m1_tum(
@@ -858,7 +858,7 @@ vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m1_tum(
@@ -867,7 +867,7 @@ vint8m1_t test_vor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m2_tum(
@@ -876,7 +876,7 @@ vint8m1_t test_vor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m2_tum(
@@ -885,7 +885,7 @@ vint8m2_t test_vor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m4_tum(
@@ -894,7 +894,7 @@ vint8m2_t test_vor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m4_tum(
@@ -903,7 +903,7 @@ vint8m4_t test_vor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m8_tum(
@@ -912,7 +912,7 @@ vint8m4_t test_vor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m8_tum(
@@ -921,7 +921,7 @@ vint8m8_t test_vor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_tum(
@@ -930,7 +930,7 @@ vint8m8_t test_vor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_tum(
@@ -939,7 +939,7 @@ vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_tum(
@@ -948,7 +948,7 @@ vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_tum(
@@ -957,7 +957,7 @@ vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m1_tum(
@@ -966,7 +966,7 @@ vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m1_tum(
@@ -975,7 +975,7 @@ vint16m1_t test_vor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m2_tum(
@@ -984,7 +984,7 @@ vint16m1_t test_vor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m2_tum(
@@ -993,7 +993,7 @@ vint16m2_t test_vor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m4_tum(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m4_tum(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m8_tum(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m8_tum(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tum(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tum(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m1_tum(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m1_tum(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m2_tum(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m2_tum(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m4_tum(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m4_tum(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m8_tum(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m8_tum(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m1_tum(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m1_tum(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m2_tum(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m2_tum(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m4_tum(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m4_tum(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m8_tum(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m8_tum(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_tum(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_tum(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_tum(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_tum(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_tum(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_tum(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m1_tum(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m1_tum(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m2_tum(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m2_tum(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m4_tum(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m4_tum(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m8_tum(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m8_tum(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_tum(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_tum(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_tum(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_tum(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m1_tum(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m1_tum(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m2_tum(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m2_tum(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m4_tum(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m4_tum(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m8_tum(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m8_tum(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tum(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tum(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m1_tum(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m1_tum(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m2_tum(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m2_tum(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m4_tum(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m4_tum(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m8_tum(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m8_tum(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m1_tum(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m1_tum(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m2_tum(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m2_tum(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m4_tum(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m4_tum(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m8_tum(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m8_tum(
@@ -1587,7 +1587,7 @@ vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_tumu(
@@ -1596,7 +1596,7 @@ vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_tumu(
@@ -1605,7 +1605,7 @@ vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_tumu(
@@ -1614,7 +1614,7 @@ vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_tumu(
@@ -1623,7 +1623,7 @@ vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_tumu(
@@ -1632,7 +1632,7 @@ vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_tumu(
@@ -1641,7 +1641,7 @@ vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m1_tumu(
@@ -1650,7 +1650,7 @@ vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m1_tumu(
@@ -1659,7 +1659,7 @@ vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m2_tumu(
@@ -1668,7 +1668,7 @@ vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m2_tumu(
@@ -1677,7 +1677,7 @@ vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m4_tumu(
@@ -1686,7 +1686,7 @@ vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m4_tumu(
@@ -1695,7 +1695,7 @@ vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m8_tumu(
@@ -1704,7 +1704,7 @@ vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m8_tumu(
@@ -1713,7 +1713,7 @@ vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_tumu(
@@ -1722,7 +1722,7 @@ vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_tumu(
@@ -1731,7 +1731,7 @@ vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_tumu(
@@ -1740,7 +1740,7 @@ vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_tumu(
@@ -1749,7 +1749,7 @@ vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m1_tumu(
@@ -1758,7 +1758,7 @@ vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m1_tumu(
@@ -1767,7 +1767,7 @@ vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m2_tumu(
@@ -1776,7 +1776,7 @@ vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m2_tumu(
@@ -1785,7 +1785,7 @@ vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m4_tumu(
@@ -1794,7 +1794,7 @@ vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m4_tumu(
@@ -1803,7 +1803,7 @@ vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m8_tumu(
@@ -1812,7 +1812,7 @@ vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m8_tumu(
@@ -1821,7 +1821,7 @@ vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tumu(
@@ -1830,7 +1830,7 @@ vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tumu(
@@ -1839,7 +1839,7 @@ vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m1_tumu(
@@ -1848,7 +1848,7 @@ vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m1_tumu(
@@ -1857,7 +1857,7 @@ vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m2_tumu(
@@ -1866,7 +1866,7 @@ vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m2_tumu(
@@ -1875,7 +1875,7 @@ vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m4_tumu(
@@ -1884,7 +1884,7 @@ vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m4_tumu(
@@ -1893,7 +1893,7 @@ vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m8_tumu(
@@ -1902,7 +1902,7 @@ vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m8_tumu(
@@ -1911,7 +1911,7 @@ vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m1_tumu(
@@ -1920,7 +1920,7 @@ vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m1_tumu(
@@ -1929,7 +1929,7 @@ vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m2_tumu(
@@ -1938,7 +1938,7 @@ vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m2_tumu(
@@ -1947,7 +1947,7 @@ vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m4_tumu(
@@ -1956,7 +1956,7 @@ vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m4_tumu(
@@ -1965,7 +1965,7 @@ vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m8_tumu(
@@ -1974,7 +1974,7 @@ vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m8_tumu(
@@ -1983,7 +1983,7 @@ vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_tumu(
@@ -1992,7 +1992,7 @@ vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_tumu(
@@ -2001,7 +2001,7 @@ vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_tumu(
@@ -2010,7 +2010,7 @@ vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_tumu(
@@ -2019,7 +2019,7 @@ vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_tumu(
@@ -2028,7 +2028,7 @@ vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_tumu(
@@ -2037,7 +2037,7 @@ vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m1_tumu(
@@ -2046,7 +2046,7 @@ vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m1_tumu(
@@ -2055,7 +2055,7 @@ vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m2_tumu(
@@ -2064,7 +2064,7 @@ vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m2_tumu(
@@ -2073,7 +2073,7 @@ vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m4_tumu(
@@ -2082,7 +2082,7 @@ vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m4_tumu(
@@ -2091,7 +2091,7 @@ vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m8_tumu(
@@ -2100,7 +2100,7 @@ vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m8_tumu(
@@ -2109,7 +2109,7 @@ vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_tumu(
@@ -2118,7 +2118,7 @@ vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_tumu(
@@ -2127,7 +2127,7 @@ vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_tumu(
@@ -2136,7 +2136,7 @@ vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_tumu(
@@ -2145,7 +2145,7 @@ vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m1_tumu(
@@ -2154,7 +2154,7 @@ vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m1_tumu(
@@ -2163,7 +2163,7 @@ vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m2_tumu(
@@ -2172,7 +2172,7 @@ vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m2_tumu(
@@ -2181,7 +2181,7 @@ vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m4_tumu(
@@ -2190,7 +2190,7 @@ vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m4_tumu(
@@ -2199,7 +2199,7 @@ vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m8_tumu(
@@ -2208,7 +2208,7 @@ vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m8_tumu(
@@ -2217,7 +2217,7 @@ vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tumu(
@@ -2226,7 +2226,7 @@ vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tumu(
@@ -2235,7 +2235,7 @@ vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m1_tumu(
@@ -2244,7 +2244,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m1_tumu(
@@ -2253,7 +2253,7 @@ vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m2_tumu(
@@ -2262,7 +2262,7 @@ vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m2_tumu(
@@ -2271,7 +2271,7 @@ vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m4_tumu(
@@ -2280,7 +2280,7 @@ vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m4_tumu(
@@ -2289,7 +2289,7 @@ vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m8_tumu(
@@ -2298,7 +2298,7 @@ vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m8_tumu(
@@ -2307,7 +2307,7 @@ vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m1_tumu(
@@ -2316,7 +2316,7 @@ vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m1_tumu(
@@ -2325,7 +2325,7 @@ vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m2_tumu(
@@ -2334,7 +2334,7 @@ vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m2_tumu(
@@ -2343,7 +2343,7 @@ vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m4_tumu(
@@ -2352,7 +2352,7 @@ vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m4_tumu(
@@ -2361,7 +2361,7 @@ vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m8_tumu(
@@ -2370,7 +2370,7 @@ vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m8_tumu(
@@ -2379,7 +2379,7 @@ vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_mu(
@@ -2388,7 +2388,7 @@ vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_mu(
@@ -2397,7 +2397,7 @@ vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_mu(
@@ -2406,7 +2406,7 @@ vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_mu(
@@ -2415,7 +2415,7 @@ vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_mu(
@@ -2424,7 +2424,7 @@ vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_mu(
@@ -2433,7 +2433,7 @@ vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m1_mu(
@@ -2442,7 +2442,7 @@ vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m1_mu(
@@ -2451,7 +2451,7 @@ vint8m1_t test_vor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m2_mu(
@@ -2460,7 +2460,7 @@ vint8m1_t test_vor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m2_mu(
@@ -2469,7 +2469,7 @@ vint8m2_t test_vor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m4_mu(
@@ -2478,7 +2478,7 @@ vint8m2_t test_vor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m4_mu(
@@ -2487,7 +2487,7 @@ vint8m4_t test_vor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i8m8_mu(
@@ -2496,7 +2496,7 @@ vint8m4_t test_vor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i8m8_mu(
@@ -2505,7 +2505,7 @@ vint8m8_t test_vor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_mu(
@@ -2514,7 +2514,7 @@ vint8m8_t test_vor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_mu(
@@ -2523,7 +2523,7 @@ vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_mu(
@@ -2532,7 +2532,7 @@ vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_mu(
@@ -2541,7 +2541,7 @@ vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m1_mu(
@@ -2550,7 +2550,7 @@ vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m1_mu(
@@ -2559,7 +2559,7 @@ vint16m1_t test_vor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m2_mu(
@@ -2568,7 +2568,7 @@ vint16m1_t test_vor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m2_mu(
@@ -2577,7 +2577,7 @@ vint16m2_t test_vor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m4_mu(
@@ -2586,7 +2586,7 @@ vint16m2_t test_vor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m4_mu(
@@ -2595,7 +2595,7 @@ vint16m4_t test_vor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i16m8_mu(
@@ -2604,7 +2604,7 @@ vint16m4_t test_vor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i16m8_mu(
@@ -2613,7 +2613,7 @@ vint16m8_t test_vor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_mu(
@@ -2622,7 +2622,7 @@ vint16m8_t test_vor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_mu(
@@ -2631,7 +2631,7 @@ vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m1_mu(
@@ -2640,7 +2640,7 @@ vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m1_mu(
@@ -2649,7 +2649,7 @@ vint32m1_t test_vor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m2_mu(
@@ -2658,7 +2658,7 @@ vint32m1_t test_vor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m2_mu(
@@ -2667,7 +2667,7 @@ vint32m2_t test_vor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m4_mu(
@@ -2676,7 +2676,7 @@ vint32m2_t test_vor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m4_mu(
@@ -2685,7 +2685,7 @@ vint32m4_t test_vor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i32m8_mu(
@@ -2694,7 +2694,7 @@ vint32m4_t test_vor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i32m8_mu(
@@ -2703,7 +2703,7 @@ vint32m8_t test_vor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m1_mu(
@@ -2712,7 +2712,7 @@ vint32m8_t test_vor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m1_mu(
@@ -2721,7 +2721,7 @@ vint64m1_t test_vor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m2_mu(
@@ -2730,7 +2730,7 @@ vint64m1_t test_vor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m2_mu(
@@ -2739,7 +2739,7 @@ vint64m2_t test_vor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m4_mu(
@@ -2748,7 +2748,7 @@ vint64m2_t test_vor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m4_mu(
@@ -2757,7 +2757,7 @@ vint64m4_t test_vor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_i64m8_mu(
@@ -2766,7 +2766,7 @@ vint64m4_t test_vor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_i64m8_mu(
@@ -2775,7 +2775,7 @@ vint64m8_t test_vor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_mu(
@@ -2784,7 +2784,7 @@ vint64m8_t test_vor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_mu(
@@ -2793,7 +2793,7 @@ vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_mu(
@@ -2802,7 +2802,7 @@ vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_mu(
@@ -2811,7 +2811,7 @@ vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_mu(
@@ -2820,7 +2820,7 @@ vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_mu(
@@ -2829,7 +2829,7 @@ vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m1_mu(
@@ -2838,7 +2838,7 @@ vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m1_mu(
@@ -2847,7 +2847,7 @@ vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m2_mu(
@@ -2856,7 +2856,7 @@ vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m2_mu(
@@ -2865,7 +2865,7 @@ vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m4_mu(
@@ -2874,7 +2874,7 @@ vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m4_mu(
@@ -2883,7 +2883,7 @@ vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u8m8_mu(
@@ -2892,7 +2892,7 @@ vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u8m8_mu(
@@ -2901,7 +2901,7 @@ vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_mu(
@@ -2910,7 +2910,7 @@ vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_mu(
@@ -2919,7 +2919,7 @@ vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_mu(
@@ -2928,7 +2928,7 @@ vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_mu(
@@ -2937,7 +2937,7 @@ vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m1_mu(
@@ -2946,7 +2946,7 @@ vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m1_mu(
@@ -2955,7 +2955,7 @@ vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m2_mu(
@@ -2964,7 +2964,7 @@ vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m2_mu(
@@ -2973,7 +2973,7 @@ vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m4_mu(
@@ -2982,7 +2982,7 @@ vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m4_mu(
@@ -2991,7 +2991,7 @@ vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u16m8_mu(
@@ -3000,7 +3000,7 @@ vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u16m8_mu(
@@ -3009,7 +3009,7 @@ vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_mu(
@@ -3018,7 +3018,7 @@ vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_mu(
@@ -3027,7 +3027,7 @@ vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m1_mu(
@@ -3036,7 +3036,7 @@ vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m1_mu(
@@ -3045,7 +3045,7 @@ vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m2_mu(
@@ -3054,7 +3054,7 @@ vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m2_mu(
@@ -3063,7 +3063,7 @@ vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m4_mu(
@@ -3072,7 +3072,7 @@ vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m4_mu(
@@ -3081,7 +3081,7 @@ vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u32m8_mu(
@@ -3090,7 +3090,7 @@ vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u32m8_mu(
@@ -3099,7 +3099,7 @@ vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m1_mu(
@@ -3108,7 +3108,7 @@ vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m1_mu(
@@ -3117,7 +3117,7 @@ vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m2_mu(
@@ -3126,7 +3126,7 @@ vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m2_mu(
@@ -3135,7 +3135,7 @@ vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m4_mu(
@@ -3144,7 +3144,7 @@ vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m4_mu(
@@ -3153,7 +3153,7 @@ vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vv_u64m8_mu(
@@ -3162,7 +3162,7 @@ vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vor_vx_u64m8_mu(
@@ -3171,6 +3171,6 @@ vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vor_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vor_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredand.c
index 27d13dd5b4f5..b59faeec2085 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredand.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tu(
@@ -21,7 +21,7 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tu(
@@ -30,7 +30,7 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tu(
@@ -39,7 +39,7 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tu(
@@ -57,7 +57,7 @@ vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tu(
@@ -66,7 +66,7 @@ vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tu(
@@ -84,7 +84,7 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tu(
@@ -93,7 +93,7 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tu(
@@ -111,7 +111,7 @@ vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tu(
@@ -120,7 +120,7 @@ vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tu(
@@ -129,7 +129,7 @@ vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tu(
@@ -138,7 +138,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tu(
@@ -156,7 +156,7 @@ vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tu(
@@ -165,7 +165,7 @@ vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tu(
@@ -192,7 +192,7 @@ vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tu(
@@ -201,7 +201,7 @@ vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tu(
@@ -210,7 +210,7 @@ vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tu(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tu(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tu(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tu(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tu(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tu(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tu(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tu(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tu(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tu(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tu(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tu(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tu(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tu(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tu(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tu(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tu(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tu(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tu(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tu(
@@ -399,7 +399,7 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_tum(
@@ -408,7 +408,7 @@ vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tum(
@@ -417,7 +417,7 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tum(
@@ -426,7 +426,7 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tum(
@@ -435,7 +435,7 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tum(
@@ -444,7 +444,7 @@ vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tum(
@@ -453,7 +453,7 @@ vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tum(
@@ -462,7 +462,7 @@ vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tum(
@@ -480,7 +480,7 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tum(
@@ -489,7 +489,7 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tum(
@@ -498,7 +498,7 @@ vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tum(
@@ -507,7 +507,7 @@ vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tum(
@@ -516,7 +516,7 @@ vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum(
@@ -525,7 +525,7 @@ vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tum(
@@ -534,7 +534,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tum(
@@ -543,7 +543,7 @@ vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tum(
@@ -552,7 +552,7 @@ vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tum(
@@ -561,7 +561,7 @@ vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tum(
@@ -570,7 +570,7 @@ vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tum(
@@ -579,7 +579,7 @@ vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tum(
@@ -588,7 +588,7 @@ vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tum(
@@ -597,7 +597,7 @@ vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tum(
@@ -606,7 +606,7 @@ vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tum(
@@ -615,7 +615,7 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tum(
@@ -624,7 +624,7 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tum(
@@ -633,7 +633,7 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tum(
@@ -642,7 +642,7 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tum(
@@ -651,7 +651,7 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tum(
@@ -660,7 +660,7 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tum(
@@ -669,7 +669,7 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tum(
@@ -678,7 +678,7 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tum(
@@ -687,7 +687,7 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tum(
@@ -696,7 +696,7 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tum(
@@ -705,7 +705,7 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tum(
@@ -714,7 +714,7 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum(
@@ -723,7 +723,7 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tum(
@@ -732,7 +732,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tum(
@@ -741,7 +741,7 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tum(
@@ -750,7 +750,7 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tum(
@@ -759,7 +759,7 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tum(
@@ -768,7 +768,7 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tum(
@@ -777,7 +777,7 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tum(
@@ -786,7 +786,7 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tum(
@@ -795,6 +795,6 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredand_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredand_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmax.c
index 0d0fda30e003..09d227d620de 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmax.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tu(
@@ -21,7 +21,7 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tu(
@@ -30,7 +30,7 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tu(
@@ -39,7 +39,7 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tu(
@@ -57,7 +57,7 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tu(
@@ -66,7 +66,7 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tu(
@@ -84,7 +84,7 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tu(
@@ -93,7 +93,7 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tu(
@@ -111,7 +111,7 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tu(
@@ -120,7 +120,7 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tu(
@@ -129,7 +129,7 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tu(
@@ -138,7 +138,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tu(
@@ -156,7 +156,7 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tu(
@@ -165,7 +165,7 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tu(
@@ -192,7 +192,7 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tu(
@@ -201,7 +201,7 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_tum(
@@ -210,7 +210,7 @@ vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tum(
@@ -219,7 +219,7 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tum(
@@ -228,7 +228,7 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tum(
@@ -237,7 +237,7 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tum(
@@ -246,7 +246,7 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tum(
@@ -255,7 +255,7 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tum(
@@ -264,7 +264,7 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tum(
@@ -273,7 +273,7 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tum(
@@ -282,7 +282,7 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tum(
@@ -291,7 +291,7 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tum(
@@ -300,7 +300,7 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tum(
@@ -309,7 +309,7 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tum(
@@ -318,7 +318,7 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum(
@@ -327,7 +327,7 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tum(
@@ -336,7 +336,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tum(
@@ -345,7 +345,7 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tum(
@@ -354,7 +354,7 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tum(
@@ -363,7 +363,7 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tum(
@@ -372,7 +372,7 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tum(
@@ -381,7 +381,7 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tum(
@@ -390,7 +390,7 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tum(
@@ -399,6 +399,6 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmax_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredmax_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmaxu.c
index 67613624e3ce..3a487800c413 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmaxu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmaxu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tu(
@@ -21,7 +21,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vect
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tu(
@@ -30,7 +30,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vect
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tu(
@@ -39,7 +39,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vect
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tu(
@@ -48,7 +48,7 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tu(
@@ -57,7 +57,7 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tu(
@@ -84,7 +84,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tu(
@@ -93,7 +93,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tu(
@@ -102,7 +102,7 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tu(
@@ -111,7 +111,7 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tu(
@@ -120,7 +120,7 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu(
@@ -129,7 +129,7 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tu(
@@ -138,7 +138,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tu(
@@ -147,7 +147,7 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tu(
@@ -156,7 +156,7 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tu(
@@ -165,7 +165,7 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tu(
@@ -174,7 +174,7 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tu(
@@ -183,7 +183,7 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tu(
@@ -192,7 +192,7 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tu(
@@ -201,7 +201,7 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_tum(
@@ -210,7 +210,7 @@ vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tum(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tum(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tum(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tum(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tum(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tum(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tum(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tum(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tum(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tum(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tum(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tum(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tum(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tum(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tum(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tum(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tum(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tum(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tum(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tum(
@@ -399,6 +399,6 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmin.c
index 568a06647f87..9b869f59feae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredmin.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tu(
@@ -21,7 +21,7 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tu(
@@ -30,7 +30,7 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tu(
@@ -39,7 +39,7 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tu(
@@ -57,7 +57,7 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tu(
@@ -66,7 +66,7 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tu(
@@ -84,7 +84,7 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tu(
@@ -93,7 +93,7 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tu(
@@ -111,7 +111,7 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tu(
@@ -120,7 +120,7 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tu(
@@ -129,7 +129,7 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tu(
@@ -138,7 +138,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tu(
@@ -156,7 +156,7 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tu(
@@ -165,7 +165,7 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tu(
@@ -192,7 +192,7 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tu(
@@ -201,7 +201,7 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_tum(
@@ -210,7 +210,7 @@ vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tum(
@@ -219,7 +219,7 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tum(
@@ -228,7 +228,7 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tum(
@@ -237,7 +237,7 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tum(
@@ -246,7 +246,7 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tum(
@@ -255,7 +255,7 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tum(
@@ -264,7 +264,7 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tum(
@@ -273,7 +273,7 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tum(
@@ -282,7 +282,7 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tum(
@@ -291,7 +291,7 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tum(
@@ -300,7 +300,7 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tum(
@@ -309,7 +309,7 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tum(
@@ -318,7 +318,7 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum(
@@ -327,7 +327,7 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tum(
@@ -336,7 +336,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tum(
@@ -345,7 +345,7 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tum(
@@ -354,7 +354,7 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tum(
@@ -363,7 +363,7 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tum(
@@ -372,7 +372,7 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tum(
@@ -381,7 +381,7 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tum(
@@ -390,7 +390,7 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tum(
@@ -399,6 +399,6 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredmin_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredmin_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredminu.c
index 991fa7c43bb0..1215ddd56e43 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredminu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredminu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tu(
@@ -21,7 +21,7 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vect
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tu(
@@ -30,7 +30,7 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vect
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tu(
@@ -39,7 +39,7 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vect
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tu(
@@ -48,7 +48,7 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tu(
@@ -57,7 +57,7 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tu(
@@ -84,7 +84,7 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tu(
@@ -93,7 +93,7 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tu(
@@ -102,7 +102,7 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tu(
@@ -111,7 +111,7 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tu(
@@ -120,7 +120,7 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu(
@@ -129,7 +129,7 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tu(
@@ -138,7 +138,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tu(
@@ -147,7 +147,7 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tu(
@@ -156,7 +156,7 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tu(
@@ -165,7 +165,7 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tu(
@@ -174,7 +174,7 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tu(
@@ -183,7 +183,7 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tu(
@@ -192,7 +192,7 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tu(
@@ -201,7 +201,7 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_tum(
@@ -210,7 +210,7 @@ vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tum(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tum(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tum(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tum(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tum(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tum(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tum(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tum(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tum(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tum(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tum(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tum(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tum(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tum(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tum(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tum(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tum(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tum(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tum(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tum(
@@ -399,6 +399,6 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredminu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredor.c
index e3e671541ad9..07d15b2524e3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tu(
@@ -21,7 +21,7 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tu(
@@ -30,7 +30,7 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tu(
@@ -39,7 +39,7 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tu(
@@ -57,7 +57,7 @@ vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tu(
@@ -66,7 +66,7 @@ vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tu(
@@ -84,7 +84,7 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tu(
@@ -93,7 +93,7 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tu(
@@ -111,7 +111,7 @@ vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tu(
@@ -120,7 +120,7 @@ vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tu(
@@ -129,7 +129,7 @@ vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tu(
@@ -138,7 +138,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vect
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tu(
@@ -156,7 +156,7 @@ vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tu(
@@ -165,7 +165,7 @@ vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tu(
@@ -192,7 +192,7 @@ vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tu(
@@ -201,7 +201,7 @@ vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tu(
@@ -210,7 +210,7 @@ vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tu(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tu(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tu(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tu(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tu(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tu(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tu(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tu(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tu(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tu(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tu(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tu(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tu(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tu(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tu(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tu(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tu(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vec
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tu(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vec
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tu(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vec
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tu(
@@ -399,7 +399,7 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vec
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_tum(
@@ -408,7 +408,7 @@ vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vec
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tum(
@@ -417,7 +417,7 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tum(
@@ -426,7 +426,7 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tum(
@@ -435,7 +435,7 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tum(
@@ -444,7 +444,7 @@ vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tum(
@@ -453,7 +453,7 @@ vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tum(
@@ -462,7 +462,7 @@ vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tum(
@@ -480,7 +480,7 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tum(
@@ -489,7 +489,7 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tum(
@@ -498,7 +498,7 @@ vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tum(
@@ -507,7 +507,7 @@ vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tum(
@@ -516,7 +516,7 @@ vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum(
@@ -525,7 +525,7 @@ vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tum(
@@ -534,7 +534,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tum(
@@ -543,7 +543,7 @@ vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tum(
@@ -552,7 +552,7 @@ vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tum(
@@ -561,7 +561,7 @@ vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tum(
@@ -570,7 +570,7 @@ vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tum(
@@ -579,7 +579,7 @@ vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tum(
@@ -588,7 +588,7 @@ vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tum(
@@ -597,7 +597,7 @@ vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tum(
@@ -606,7 +606,7 @@ vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tum(
@@ -615,7 +615,7 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tum(
@@ -624,7 +624,7 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tum(
@@ -633,7 +633,7 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tum(
@@ -642,7 +642,7 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tum(
@@ -651,7 +651,7 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tum(
@@ -660,7 +660,7 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tum(
@@ -669,7 +669,7 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tum(
@@ -678,7 +678,7 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tum(
@@ -687,7 +687,7 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tum(
@@ -696,7 +696,7 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tum(
@@ -705,7 +705,7 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tum(
@@ -714,7 +714,7 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum(
@@ -723,7 +723,7 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tum(
@@ -732,7 +732,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tum(
@@ -741,7 +741,7 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tum(
@@ -750,7 +750,7 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tum(
@@ -759,7 +759,7 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tum(
@@ -768,7 +768,7 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tum(
@@ -777,7 +777,7 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tum(
@@ -786,7 +786,7 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tum(
@@ -795,6 +795,6 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredor_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredsum.c
index 2e33965cb159..5f0962adec33 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredsum.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tu(
@@ -21,7 +21,7 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tu(
@@ -30,7 +30,7 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tu(
@@ -39,7 +39,7 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tu(
@@ -57,7 +57,7 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tu(
@@ -66,7 +66,7 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tu(
@@ -84,7 +84,7 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tu(
@@ -93,7 +93,7 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tu(
@@ -111,7 +111,7 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tu(
@@ -120,7 +120,7 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tu(
@@ -129,7 +129,7 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tu(
@@ -138,7 +138,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tu(
@@ -156,7 +156,7 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tu(
@@ -165,7 +165,7 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tu(
@@ -192,7 +192,7 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tu(
@@ -201,7 +201,7 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tu(
@@ -210,7 +210,7 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tu(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tu(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tu(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tu(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tu(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tu(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tu(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tu(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tu(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tu(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tu(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tu(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tu(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tu(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tu(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tu(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tu(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tu(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tu(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tu(
@@ -399,7 +399,7 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_tum(
@@ -408,7 +408,7 @@ vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tum(
@@ -417,7 +417,7 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tum(
@@ -426,7 +426,7 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tum(
@@ -435,7 +435,7 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tum(
@@ -444,7 +444,7 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tum(
@@ -453,7 +453,7 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tum(
@@ -462,7 +462,7 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tum(
@@ -480,7 +480,7 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tum(
@@ -489,7 +489,7 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tum(
@@ -498,7 +498,7 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tum(
@@ -507,7 +507,7 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tum(
@@ -516,7 +516,7 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum(
@@ -525,7 +525,7 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tum(
@@ -534,7 +534,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tum(
@@ -543,7 +543,7 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tum(
@@ -552,7 +552,7 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tum(
@@ -561,7 +561,7 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tum(
@@ -570,7 +570,7 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tum(
@@ -579,7 +579,7 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tum(
@@ -588,7 +588,7 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tum(
@@ -597,7 +597,7 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tum(
@@ -606,7 +606,7 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tum(
@@ -615,7 +615,7 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tum(
@@ -624,7 +624,7 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tum(
@@ -633,7 +633,7 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tum(
@@ -642,7 +642,7 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tum(
@@ -651,7 +651,7 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tum(
@@ -660,7 +660,7 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tum(
@@ -669,7 +669,7 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tum(
@@ -678,7 +678,7 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tum(
@@ -687,7 +687,7 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tum(
@@ -696,7 +696,7 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tum(
@@ -705,7 +705,7 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tum(
@@ -714,7 +714,7 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum(
@@ -723,7 +723,7 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tum(
@@ -732,7 +732,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tum(
@@ -741,7 +741,7 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tum(
@@ -750,7 +750,7 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tum(
@@ -759,7 +759,7 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tum(
@@ -768,7 +768,7 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tum(
@@ -777,7 +777,7 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tum(
@@ -786,7 +786,7 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tum(
@@ -795,6 +795,6 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredxor.c
index 3fed69459ea6..a7fb6b4bd764 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vredxor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tu(
@@ -21,7 +21,7 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tu(
@@ -30,7 +30,7 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tu(
@@ -39,7 +39,7 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tu(
@@ -57,7 +57,7 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tu(
@@ -66,7 +66,7 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tu(
@@ -84,7 +84,7 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tu(
@@ -93,7 +93,7 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tu(
@@ -111,7 +111,7 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tu(
@@ -120,7 +120,7 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tu(
@@ -129,7 +129,7 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tu(
@@ -138,7 +138,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tu(
@@ -156,7 +156,7 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tu(
@@ -165,7 +165,7 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tu(
@@ -192,7 +192,7 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tu(
@@ -201,7 +201,7 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tu(
@@ -210,7 +210,7 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tu(
@@ -219,7 +219,7 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tu(
@@ -228,7 +228,7 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tu(
@@ -237,7 +237,7 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vecto
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tu(
@@ -246,7 +246,7 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tu(
@@ -255,7 +255,7 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tu(
@@ -264,7 +264,7 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tu(
@@ -273,7 +273,7 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tu(
@@ -282,7 +282,7 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tu(
@@ -291,7 +291,7 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tu(
@@ -300,7 +300,7 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tu(
@@ -309,7 +309,7 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tu(
@@ -318,7 +318,7 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu(
@@ -327,7 +327,7 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tu(
@@ -336,7 +336,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tu(
@@ -345,7 +345,7 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tu(
@@ -354,7 +354,7 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tu(
@@ -363,7 +363,7 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tu(
@@ -372,7 +372,7 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tu(
@@ -381,7 +381,7 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tu(
@@ -390,7 +390,7 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tu(
@@ -399,7 +399,7 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_tum(
@@ -408,7 +408,7 @@ vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tum(
@@ -417,7 +417,7 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tum(
@@ -426,7 +426,7 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tum(
@@ -435,7 +435,7 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tum(
@@ -444,7 +444,7 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tum(
@@ -453,7 +453,7 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tum(
@@ -462,7 +462,7 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tum(
@@ -480,7 +480,7 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tum(
@@ -489,7 +489,7 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tum(
@@ -498,7 +498,7 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tum(
@@ -507,7 +507,7 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tum(
@@ -516,7 +516,7 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum(
@@ -525,7 +525,7 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tum(
@@ -534,7 +534,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tum(
@@ -543,7 +543,7 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tum(
@@ -552,7 +552,7 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tum(
@@ -561,7 +561,7 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tum(
@@ -570,7 +570,7 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tum(
@@ -579,7 +579,7 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tum(
@@ -588,7 +588,7 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tum(
@@ -597,7 +597,7 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tum(
@@ -606,7 +606,7 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tum(
@@ -615,7 +615,7 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tum(
@@ -624,7 +624,7 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tum(
@@ -633,7 +633,7 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tum(
@@ -642,7 +642,7 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tum(
@@ -651,7 +651,7 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tum(
@@ -660,7 +660,7 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tum(
@@ -669,7 +669,7 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tum(
@@ -678,7 +678,7 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tum(
@@ -687,7 +687,7 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tum(
@@ -696,7 +696,7 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tum(
@@ -705,7 +705,7 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tum(
@@ -714,7 +714,7 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum(
@@ -723,7 +723,7 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tum(
@@ -732,7 +732,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tum(
@@ -741,7 +741,7 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tum(
@@ -750,7 +750,7 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tum(
@@ -759,7 +759,7 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tum(
@@ -768,7 +768,7 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tum(
@@ -777,7 +777,7 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tum(
@@ -786,7 +786,7 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tum(
@@ -795,6 +795,6 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vredxor_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrem.c
index d3d5dd82f886..9de5042b2559 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrem.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrem.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrem_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrem_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrem_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrem_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrem_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrem_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vremu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vremu.c
index ff607d94e006..a947fc5fbcf4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vremu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vremu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vremu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vremu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vremu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vremu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vremu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vremu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
index dc3a0575497f..ece7851142a6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_tu(
@@ -31,7 +31,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_tu(
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_tu(
@@ -49,7 +49,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_tu(
@@ -58,7 +58,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_tu(
@@ -67,7 +67,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_tu(
@@ -76,7 +76,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_tu(
@@ -85,7 +85,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_tu(
@@ -94,7 +94,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_tu(
@@ -103,7 +103,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_tu(
@@ -112,7 +112,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tu(
@@ -121,7 +121,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tu(
@@ -130,7 +130,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_tu(
@@ -139,7 +139,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_tu(
@@ -148,7 +148,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_tu(
@@ -157,7 +157,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_tu(
@@ -166,7 +166,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_tu(
@@ -175,7 +175,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_tu(
@@ -184,7 +184,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_tu(
@@ -193,7 +193,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_tu(
@@ -202,7 +202,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_tu(
@@ -211,7 +211,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_tu(
@@ -220,7 +220,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_tu(
@@ -229,7 +229,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_tu(
@@ -238,7 +238,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_tu(
@@ -247,7 +247,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_tu(
@@ -256,7 +256,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_tu(
@@ -265,7 +265,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_tu(
@@ -274,7 +274,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_tu(
@@ -283,7 +283,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_tu(
@@ -292,7 +292,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_tu(
@@ -301,7 +301,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_tu(
@@ -310,7 +310,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_tu(
@@ -319,7 +319,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_tu(
@@ -328,7 +328,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_tu(
@@ -337,7 +337,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_tu(
@@ -346,7 +346,7 @@ vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_tu(
@@ -355,7 +355,7 @@ vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_tu(
@@ -364,7 +364,7 @@ vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_tu(
@@ -373,7 +373,7 @@ vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_tu(
@@ -382,7 +382,7 @@ vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_tu(
@@ -391,7 +391,7 @@ vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_tu(
@@ -400,7 +400,7 @@ vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_tu(
@@ -409,7 +409,7 @@ vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t in
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_tu(
@@ -418,7 +418,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_tu(
@@ -427,7 +427,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_tu(
@@ -436,7 +436,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_tu(
@@ -445,7 +445,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_tu(
@@ -454,7 +454,7 @@ vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_tu(
@@ -463,7 +463,7 @@ vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_tu(
@@ -472,7 +472,7 @@ vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_tu(
@@ -481,7 +481,7 @@ vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_tu(
@@ -490,7 +490,7 @@ vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_tu(
@@ -499,7 +499,7 @@ vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_tu(
@@ -508,7 +508,7 @@ vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tu(
@@ -517,7 +517,7 @@ vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tu(
@@ -526,7 +526,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_tu(
@@ -535,7 +535,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_tu(
@@ -544,7 +544,7 @@ vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_tu(
@@ -553,7 +553,7 @@ vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_tu(
@@ -562,7 +562,7 @@ vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_tu(
@@ -571,7 +571,7 @@ vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_tu(
@@ -580,7 +580,7 @@ vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_tu(
@@ -589,7 +589,7 @@ vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_tu(
@@ -598,7 +598,7 @@ vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_tu(
@@ -607,7 +607,7 @@ vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_tu(
@@ -616,7 +616,7 @@ vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_tu(
@@ -625,7 +625,7 @@ vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_tu(
@@ -634,7 +634,7 @@ vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_tu(
@@ -643,7 +643,7 @@ vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_tu(
@@ -652,7 +652,7 @@ vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_tu(
@@ -661,7 +661,7 @@ vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_tu(
@@ -670,7 +670,7 @@ vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_tu(
@@ -679,7 +679,7 @@ vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_tu(
@@ -688,7 +688,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_tu(
@@ -697,7 +697,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_tu(
@@ -706,7 +706,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_tu(
@@ -715,7 +715,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_tu(
@@ -724,7 +724,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_tu(
@@ -733,7 +733,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_tu(
@@ -742,7 +742,7 @@ vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_tu(
@@ -751,7 +751,7 @@ vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_tu(
@@ -760,7 +760,7 @@ vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_tu(
@@ -769,7 +769,7 @@ vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_tu(
@@ -778,7 +778,7 @@ vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_tu(
@@ -787,7 +787,7 @@ vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_tu(
@@ -796,7 +796,7 @@ vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_tu(
@@ -805,7 +805,7 @@ vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_tu(
@@ -814,7 +814,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_tu(
@@ -823,7 +823,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_tu(
@@ -832,7 +832,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_tu(
@@ -841,7 +841,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_tu(
@@ -850,7 +850,7 @@ vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_tu(
@@ -859,7 +859,7 @@ vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_tu(
@@ -868,7 +868,7 @@ vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_tu(
@@ -877,7 +877,7 @@ vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_tu(
@@ -886,7 +886,7 @@ vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_tu(
@@ -895,7 +895,7 @@ vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_tu(
@@ -904,7 +904,7 @@ vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tu(
@@ -913,7 +913,7 @@ vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tu(
@@ -922,7 +922,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_tu(
@@ -931,7 +931,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_tu(
@@ -940,7 +940,7 @@ vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_tu(
@@ -949,7 +949,7 @@ vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_tu(
@@ -958,7 +958,7 @@ vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_tu(
@@ -967,7 +967,7 @@ vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_tu(
@@ -976,7 +976,7 @@ vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_tu(
@@ -985,7 +985,7 @@ vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_tu(
@@ -994,7 +994,7 @@ vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_tu(
@@ -1003,7 +1003,7 @@ vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_tu(
@@ -1012,7 +1012,7 @@ vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_tu(
@@ -1021,7 +1021,7 @@ vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_tu(
@@ -1030,7 +1030,7 @@ vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_tu(
@@ -1039,7 +1039,7 @@ vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_tu(
@@ -1048,7 +1048,7 @@ vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_tu(
@@ -1057,7 +1057,7 @@ vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_tu(
@@ -1066,7 +1066,7 @@ vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) {
- return vrgather_tu(maskedoff, op1, index, vl);
+ return __riscv_vrgather_tu(maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_tum(
@@ -1075,7 +1075,7 @@ vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_tum(
@@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_tum(
@@ -1093,7 +1093,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_tum(
@@ -1102,7 +1102,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_tum(
@@ -1111,7 +1111,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_tum(
@@ -1120,7 +1120,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_tum(
@@ -1129,7 +1129,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_tum(
@@ -1138,7 +1138,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_tum(
@@ -1147,7 +1147,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_tum(
@@ -1156,7 +1156,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_tum(
@@ -1165,7 +1165,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_tum(
@@ -1174,7 +1174,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tum(
@@ -1183,7 +1183,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tum(
@@ -1192,7 +1192,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_tum(
@@ -1201,7 +1201,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_tum(
@@ -1210,7 +1210,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_tum(
@@ -1219,7 +1219,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_tum(
@@ -1228,7 +1228,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_tum(
@@ -1237,7 +1237,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_tum(
@@ -1246,7 +1246,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_tum(
@@ -1255,7 +1255,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_tum(
@@ -1264,7 +1264,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_tum(
@@ -1273,7 +1273,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_tum(
@@ -1282,7 +1282,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_tum(
@@ -1291,7 +1291,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_tum(
@@ -1300,7 +1300,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_tum(
@@ -1309,7 +1309,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_tum(
@@ -1318,7 +1318,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_tum(
@@ -1327,7 +1327,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_tum(
@@ -1336,7 +1336,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_tum(
@@ -1345,7 +1345,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_tum(
@@ -1354,7 +1354,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_tum(
@@ -1363,7 +1363,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_tum(
@@ -1372,7 +1372,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_tum(
@@ -1381,7 +1381,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_tum(
@@ -1390,7 +1390,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_tum(
@@ -1399,7 +1399,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_tum(
@@ -1408,7 +1408,7 @@ vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_tum(
@@ -1417,7 +1417,7 @@ vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_tum(
@@ -1426,7 +1426,7 @@ vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_tum(
@@ -1435,7 +1435,7 @@ vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_tum(
@@ -1444,7 +1444,7 @@ vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_tum(
@@ -1453,7 +1453,7 @@ vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_tum(
@@ -1462,7 +1462,7 @@ vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_tum(
@@ -1471,7 +1471,7 @@ vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_tum(
@@ -1480,7 +1480,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_tum(
@@ -1489,7 +1489,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_tum(
@@ -1498,7 +1498,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_tum(
@@ -1507,7 +1507,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_tum(
@@ -1516,7 +1516,7 @@ vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_tum(
@@ -1525,7 +1525,7 @@ vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_tum(
@@ -1534,7 +1534,7 @@ vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_tum(
@@ -1543,7 +1543,7 @@ vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_tum(
@@ -1552,7 +1552,7 @@ vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_tum(
@@ -1561,7 +1561,7 @@ vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_tum(
@@ -1570,7 +1570,7 @@ vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tum(
@@ -1579,7 +1579,7 @@ vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tum(
@@ -1588,7 +1588,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_tum(
@@ -1597,7 +1597,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_tum(
@@ -1606,7 +1606,7 @@ vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_tum(
@@ -1615,7 +1615,7 @@ vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_tum(
@@ -1624,7 +1624,7 @@ vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_tum(
@@ -1633,7 +1633,7 @@ vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_tum(
@@ -1642,7 +1642,7 @@ vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_tum(
@@ -1651,7 +1651,7 @@ vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_tum(
@@ -1660,7 +1660,7 @@ vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_tum(
@@ -1669,7 +1669,7 @@ vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_tum(
@@ -1678,7 +1678,7 @@ vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_tum(
@@ -1687,7 +1687,7 @@ vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_tum(
@@ -1696,7 +1696,7 @@ vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_tum(
@@ -1705,7 +1705,7 @@ vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_tum(
@@ -1714,7 +1714,7 @@ vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_tum(
@@ -1723,7 +1723,7 @@ vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_tum(
@@ -1732,7 +1732,7 @@ vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_tum(
@@ -1741,7 +1741,7 @@ vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_tum(
@@ -1750,7 +1750,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_tum(
@@ -1759,7 +1759,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_tum(
@@ -1768,7 +1768,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_tum(
@@ -1777,7 +1777,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_tum(
@@ -1786,7 +1786,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_tum(
@@ -1795,7 +1795,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_tum(
@@ -1804,7 +1804,7 @@ vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_tum(
@@ -1813,7 +1813,7 @@ vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_tum(
@@ -1822,7 +1822,7 @@ vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_tum(
@@ -1831,7 +1831,7 @@ vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_tum(
@@ -1840,7 +1840,7 @@ vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_tum(
@@ -1849,7 +1849,7 @@ vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_tum(
@@ -1858,7 +1858,7 @@ vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_tum(
@@ -1867,7 +1867,7 @@ vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_tum(
@@ -1876,7 +1876,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_tum(
@@ -1885,7 +1885,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_tum(
@@ -1894,7 +1894,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_tum(
@@ -1903,7 +1903,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_tum(
@@ -1912,7 +1912,7 @@ vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_tum(
@@ -1921,7 +1921,7 @@ vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_tum(
@@ -1930,7 +1930,7 @@ vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_tum(
@@ -1939,7 +1939,7 @@ vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_tum(
@@ -1948,7 +1948,7 @@ vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_tum(
@@ -1957,7 +1957,7 @@ vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_tum(
@@ -1966,7 +1966,7 @@ vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tum(
@@ -1975,7 +1975,7 @@ vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tum(
@@ -1984,7 +1984,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_tum(
@@ -1993,7 +1993,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_tum(
@@ -2002,7 +2002,7 @@ vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_tum(
@@ -2011,7 +2011,7 @@ vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_tum(
@@ -2020,7 +2020,7 @@ vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_tum(
@@ -2029,7 +2029,7 @@ vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_tum(
@@ -2038,7 +2038,7 @@ vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_tum(
@@ -2047,7 +2047,7 @@ vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_tum(
@@ -2056,7 +2056,7 @@ vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_tum(
@@ -2065,7 +2065,7 @@ vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_tum(
@@ -2074,7 +2074,7 @@ vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_tum(
@@ -2083,7 +2083,7 @@ vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_tum(
@@ -2092,7 +2092,7 @@ vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_tum(
@@ -2101,7 +2101,7 @@ vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_tum(
@@ -2110,7 +2110,7 @@ vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_tum(
@@ -2119,7 +2119,7 @@ vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_tum(
@@ -2128,7 +2128,7 @@ vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) {
- return vrgather_tum(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_tumu(
@@ -2137,7 +2137,7 @@ vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_tumu(
@@ -2146,7 +2146,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_tumu(
@@ -2155,7 +2155,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_tumu(
@@ -2164,7 +2164,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_tumu(
@@ -2173,7 +2173,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_tumu(
@@ -2182,7 +2182,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_tumu(
@@ -2191,7 +2191,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_tumu(
@@ -2200,7 +2200,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_tumu(
@@ -2209,7 +2209,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_tumu(
@@ -2218,7 +2218,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_tumu(
@@ -2227,7 +2227,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_tumu(
@@ -2236,7 +2236,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tumu(
@@ -2245,7 +2245,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tumu(
@@ -2254,7 +2254,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_tumu(
@@ -2263,7 +2263,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_tumu(
@@ -2272,7 +2272,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_tumu(
@@ -2281,7 +2281,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_tumu(
@@ -2290,7 +2290,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_tumu(
@@ -2299,7 +2299,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_tumu(
@@ -2308,7 +2308,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_tumu(
@@ -2317,7 +2317,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_tumu(
@@ -2326,7 +2326,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_tumu(
@@ -2335,7 +2335,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_tumu(
@@ -2344,7 +2344,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_tumu(
@@ -2353,7 +2353,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_tumu(
@@ -2362,7 +2362,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_tumu(
@@ -2371,7 +2371,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_tumu(
@@ -2380,7 +2380,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_tumu(
@@ -2389,7 +2389,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_tumu(
@@ -2398,7 +2398,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_tumu(
@@ -2407,7 +2407,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_tumu(
@@ -2416,7 +2416,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_tumu(
@@ -2425,7 +2425,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_tumu(
@@ -2434,7 +2434,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_tumu(
@@ -2443,7 +2443,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_tumu(
@@ -2452,7 +2452,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_tumu(
@@ -2461,7 +2461,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_tumu(
@@ -2470,7 +2470,7 @@ vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_tumu(
@@ -2479,7 +2479,7 @@ vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_tumu(
@@ -2488,7 +2488,7 @@ vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_tumu(
@@ -2497,7 +2497,7 @@ vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_tumu(
@@ -2506,7 +2506,7 @@ vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_tumu(
@@ -2515,7 +2515,7 @@ vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_tumu(
@@ -2524,7 +2524,7 @@ vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_tumu(
@@ -2533,7 +2533,7 @@ vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_tumu(
@@ -2542,7 +2542,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_tumu(
@@ -2551,7 +2551,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_tumu(
@@ -2560,7 +2560,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_tumu(
@@ -2569,7 +2569,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_tumu(
@@ -2578,7 +2578,7 @@ vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_tumu(
@@ -2587,7 +2587,7 @@ vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_tumu(
@@ -2596,7 +2596,7 @@ vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_tumu(
@@ -2605,7 +2605,7 @@ vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_tumu(
@@ -2614,7 +2614,7 @@ vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_tumu(
@@ -2623,7 +2623,7 @@ vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_tumu(
@@ -2632,7 +2632,7 @@ vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tumu(
@@ -2641,7 +2641,7 @@ vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tumu(
@@ -2650,7 +2650,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_tumu(
@@ -2659,7 +2659,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_tumu(
@@ -2668,7 +2668,7 @@ vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_tumu(
@@ -2677,7 +2677,7 @@ vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_tumu(
@@ -2686,7 +2686,7 @@ vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_tumu(
@@ -2695,7 +2695,7 @@ vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_tumu(
@@ -2704,7 +2704,7 @@ vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_tumu(
@@ -2713,7 +2713,7 @@ vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_tumu(
@@ -2722,7 +2722,7 @@ vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_tumu(
@@ -2731,7 +2731,7 @@ vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_tumu(
@@ -2740,7 +2740,7 @@ vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_tumu(
@@ -2749,7 +2749,7 @@ vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_tumu(
@@ -2758,7 +2758,7 @@ vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_tumu(
@@ -2767,7 +2767,7 @@ vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_tumu(
@@ -2776,7 +2776,7 @@ vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_tumu(
@@ -2785,7 +2785,7 @@ vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_tumu(
@@ -2794,7 +2794,7 @@ vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_tumu(
@@ -2803,7 +2803,7 @@ vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_tumu(
@@ -2812,7 +2812,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_tumu(
@@ -2821,7 +2821,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_tumu(
@@ -2830,7 +2830,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_tumu(
@@ -2839,7 +2839,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_tumu(
@@ -2848,7 +2848,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_tumu(
@@ -2857,7 +2857,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_tumu(
@@ -2866,7 +2866,7 @@ vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_tumu(
@@ -2875,7 +2875,7 @@ vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_tumu(
@@ -2884,7 +2884,7 @@ vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_tumu(
@@ -2893,7 +2893,7 @@ vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_tumu(
@@ -2902,7 +2902,7 @@ vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_tumu(
@@ -2911,7 +2911,7 @@ vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_tumu(
@@ -2920,7 +2920,7 @@ vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_tumu(
@@ -2929,7 +2929,7 @@ vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_tumu(
@@ -2938,7 +2938,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_tumu(
@@ -2947,7 +2947,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_tumu(
@@ -2956,7 +2956,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_tumu(
@@ -2965,7 +2965,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_tumu(
@@ -2974,7 +2974,7 @@ vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_tumu(
@@ -2983,7 +2983,7 @@ vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_tumu(
@@ -2992,7 +2992,7 @@ vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_tumu(
@@ -3001,7 +3001,7 @@ vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_tumu(
@@ -3010,7 +3010,7 @@ vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_tumu(
@@ -3019,7 +3019,7 @@ vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_tumu(
@@ -3028,7 +3028,7 @@ vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tumu(
@@ -3037,7 +3037,7 @@ vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tumu(
@@ -3046,7 +3046,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_tumu(
@@ -3055,7 +3055,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_tumu(
@@ -3064,7 +3064,7 @@ vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_tumu(
@@ -3073,7 +3073,7 @@ vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_tumu(
@@ -3082,7 +3082,7 @@ vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_tumu(
@@ -3091,7 +3091,7 @@ vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_tumu(
@@ -3100,7 +3100,7 @@ vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_tumu(
@@ -3109,7 +3109,7 @@ vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_tumu(
@@ -3118,7 +3118,7 @@ vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_tumu(
@@ -3127,7 +3127,7 @@ vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_tumu(
@@ -3136,7 +3136,7 @@ vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_tumu(
@@ -3145,7 +3145,7 @@ vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_tumu(
@@ -3154,7 +3154,7 @@ vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_tumu(
@@ -3163,7 +3163,7 @@ vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_tumu(
@@ -3172,7 +3172,7 @@ vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_tumu(
@@ -3181,7 +3181,7 @@ vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_tumu(
@@ -3190,7 +3190,7 @@ vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) {
- return vrgather_tumu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_mu(
@@ -3199,7 +3199,7 @@ vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_mu(
@@ -3208,7 +3208,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_mu(
@@ -3217,7 +3217,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_mu(
@@ -3226,7 +3226,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_mu(
@@ -3235,7 +3235,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_mu(
@@ -3244,7 +3244,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_mu(
@@ -3253,7 +3253,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_mu(
@@ -3262,7 +3262,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_mu(
@@ -3271,7 +3271,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_mu(
@@ -3280,7 +3280,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_mu(
@@ -3289,7 +3289,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_mu(
@@ -3298,7 +3298,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_mu(
@@ -3307,7 +3307,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_mu(
@@ -3316,7 +3316,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_mu(
@@ -3325,7 +3325,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_mu(
@@ -3334,7 +3334,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_mu(
@@ -3343,7 +3343,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_mu(
@@ -3352,7 +3352,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_mu(
@@ -3361,7 +3361,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_mu(
@@ -3370,7 +3370,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_mu(
@@ -3379,7 +3379,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_mu(
@@ -3388,7 +3388,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_mu(
@@ -3397,7 +3397,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_mu(
@@ -3406,7 +3406,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_mu(
@@ -3415,7 +3415,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_mu(
@@ -3424,7 +3424,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_mu(
@@ -3433,7 +3433,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_mu(
@@ -3442,7 +3442,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_mu(
@@ -3451,7 +3451,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_mu(
@@ -3460,7 +3460,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_mu(
@@ -3469,7 +3469,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_mu(
@@ -3478,7 +3478,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_mu(
@@ -3487,7 +3487,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_mu(
@@ -3496,7 +3496,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_mu(
@@ -3505,7 +3505,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_mu(
@@ -3514,7 +3514,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_mu(
@@ -3523,7 +3523,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_mu(
@@ -3532,7 +3532,7 @@ vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_mu(
@@ -3541,7 +3541,7 @@ vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_mu(
@@ -3550,7 +3550,7 @@ vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_mu(
@@ -3559,7 +3559,7 @@ vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_mu(
@@ -3568,7 +3568,7 @@ vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_mu(
@@ -3577,7 +3577,7 @@ vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_mu(
@@ -3586,7 +3586,7 @@ vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_mu(
@@ -3595,7 +3595,7 @@ vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_mu(
@@ -3604,7 +3604,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_mu(
@@ -3613,7 +3613,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_mu(
@@ -3622,7 +3622,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_mu(
@@ -3631,7 +3631,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_mu(
@@ -3640,7 +3640,7 @@ vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_mu(
@@ -3649,7 +3649,7 @@ vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_mu(
@@ -3658,7 +3658,7 @@ vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_mu(
@@ -3667,7 +3667,7 @@ vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_mu(
@@ -3676,7 +3676,7 @@ vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_mu(
@@ -3685,7 +3685,7 @@ vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_mu(
@@ -3694,7 +3694,7 @@ vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_mu(
@@ -3703,7 +3703,7 @@ vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_mu(
@@ -3712,7 +3712,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_mu(
@@ -3721,7 +3721,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_mu(
@@ -3730,7 +3730,7 @@ vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_mu(
@@ -3739,7 +3739,7 @@ vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_mu(
@@ -3748,7 +3748,7 @@ vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_mu(
@@ -3757,7 +3757,7 @@ vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_mu(
@@ -3766,7 +3766,7 @@ vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_mu(
@@ -3775,7 +3775,7 @@ vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_mu(
@@ -3784,7 +3784,7 @@ vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_mu(
@@ -3793,7 +3793,7 @@ vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_mu(
@@ -3802,7 +3802,7 @@ vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_mu(
@@ -3811,7 +3811,7 @@ vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_mu(
@@ -3820,7 +3820,7 @@ vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_mu(
@@ -3829,7 +3829,7 @@ vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_mu(
@@ -3838,7 +3838,7 @@ vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_mu(
@@ -3847,7 +3847,7 @@ vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_mu(
@@ -3856,7 +3856,7 @@ vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_mu(
@@ -3865,7 +3865,7 @@ vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_mu(
@@ -3874,7 +3874,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_mu(
@@ -3883,7 +3883,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_mu(
@@ -3892,7 +3892,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_mu(
@@ -3901,7 +3901,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_mu(
@@ -3910,7 +3910,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_mu(
@@ -3919,7 +3919,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_mu(
@@ -3928,7 +3928,7 @@ vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_mu(
@@ -3937,7 +3937,7 @@ vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_mu(
@@ -3946,7 +3946,7 @@ vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_mu(
@@ -3955,7 +3955,7 @@ vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_mu(
@@ -3964,7 +3964,7 @@ vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_mu(
@@ -3973,7 +3973,7 @@ vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_mu(
@@ -3982,7 +3982,7 @@ vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_mu(
@@ -3991,7 +3991,7 @@ vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_mu(
@@ -4000,7 +4000,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_mu(
@@ -4009,7 +4009,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_mu(
@@ -4018,7 +4018,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_mu(
@@ -4027,7 +4027,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_mu(
@@ -4036,7 +4036,7 @@ vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_mu(
@@ -4045,7 +4045,7 @@ vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_mu(
@@ -4054,7 +4054,7 @@ vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_mu(
@@ -4063,7 +4063,7 @@ vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_mu(
@@ -4072,7 +4072,7 @@ vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_mu(
@@ -4081,7 +4081,7 @@ vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_mu(
@@ -4090,7 +4090,7 @@ vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_mu(
@@ -4099,7 +4099,7 @@ vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_mu(
@@ -4108,7 +4108,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_mu(
@@ -4117,7 +4117,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_mu(
@@ -4126,7 +4126,7 @@ vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_mu(
@@ -4135,7 +4135,7 @@ vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_mu(
@@ -4144,7 +4144,7 @@ vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_mu(
@@ -4153,7 +4153,7 @@ vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_mu(
@@ -4162,7 +4162,7 @@ vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_mu(
@@ -4171,7 +4171,7 @@ vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_mu(
@@ -4180,7 +4180,7 @@ vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_mu(
@@ -4189,7 +4189,7 @@ vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_mu(
@@ -4198,7 +4198,7 @@ vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_mu(
@@ -4207,7 +4207,7 @@ vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_mu(
@@ -4216,7 +4216,7 @@ vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_mu(
@@ -4225,7 +4225,7 @@ vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_mu(
@@ -4234,7 +4234,7 @@ vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_mu(
@@ -4243,7 +4243,7 @@ vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_mu(
@@ -4252,6 +4252,6 @@ vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) {
- return vrgather_mu(mask, maskedoff, op1, index, vl);
+ return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgatherei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgatherei16.c
index d427e12acc70..8e2e388d1628 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgatherei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgatherei16.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_tu(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_tu(
@@ -157,7 +157,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_tu(
@@ -166,7 +166,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_tu(
@@ -175,7 +175,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_tu(
@@ -184,7 +184,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_tu(
@@ -193,7 +193,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_tu(
@@ -202,7 +202,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_tu(
@@ -211,7 +211,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_tu(
@@ -220,7 +220,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_tu(
@@ -229,7 +229,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_tu(
@@ -238,7 +238,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_tu(
@@ -247,7 +247,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tu(
@@ -256,7 +256,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_tu(
@@ -265,7 +265,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_tu(
@@ -274,7 +274,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_tu(
@@ -283,7 +283,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_tu(
@@ -292,7 +292,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_tu(
@@ -301,7 +301,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_tu(
@@ -310,7 +310,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_tu(
@@ -319,7 +319,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_tu(
@@ -328,7 +328,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_tu(
@@ -337,7 +337,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_tu(
@@ -346,7 +346,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_tu(
@@ -355,7 +355,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_tu(
@@ -364,7 +364,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_tu(
@@ -373,7 +373,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_tu(
@@ -382,7 +382,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_tu(
@@ -391,7 +391,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_tu(
@@ -400,7 +400,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_tu(
@@ -409,7 +409,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_tu(
@@ -418,7 +418,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_tu(
@@ -427,7 +427,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_tu(
@@ -436,7 +436,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tu(
@@ -445,7 +445,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_tu(
@@ -454,7 +454,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_tu(
@@ -463,7 +463,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_tu(
@@ -472,7 +472,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_tu(
@@ -481,7 +481,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_tu(
@@ -490,7 +490,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_tu(
@@ -499,7 +499,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_tu(
@@ -508,7 +508,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_tu(
@@ -517,7 +517,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_tum(
@@ -526,7 +526,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_tum(
@@ -535,7 +535,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_tum(
@@ -544,7 +544,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_tum(
@@ -553,7 +553,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_tum(
@@ -562,7 +562,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_tum(
@@ -571,7 +571,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tum(
@@ -580,7 +580,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_tum(
@@ -589,7 +589,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t mask
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_tum(
@@ -598,7 +598,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_tum(
@@ -607,7 +607,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_tum(
@@ -616,7 +616,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_tum(
@@ -625,7 +625,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_tum(
@@ -634,7 +634,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_tum(
@@ -643,7 +643,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_tum(
@@ -652,7 +652,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_tum(
@@ -661,7 +661,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_tum(
@@ -670,7 +670,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_tum(
@@ -679,7 +679,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_tum(
@@ -688,7 +688,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_tum(
@@ -697,7 +697,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_tum(
@@ -706,7 +706,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_tum(
@@ -715,7 +715,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_tum(
@@ -724,7 +724,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_tum(
@@ -733,7 +733,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_tum(
@@ -742,7 +742,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_tum(
@@ -751,7 +751,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_tum(
@@ -760,7 +760,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tum(
@@ -769,7 +769,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_tum(
@@ -778,7 +778,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_tum(
@@ -787,7 +787,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_tum(
@@ -796,7 +796,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_tum(
@@ -805,7 +805,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_tum(
@@ -814,7 +814,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_tum(
@@ -823,7 +823,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_tum(
@@ -832,7 +832,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_tum(
@@ -841,7 +841,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_tum(
@@ -850,7 +850,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_tum(
@@ -859,7 +859,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_tum(
@@ -868,7 +868,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_tum(
@@ -877,7 +877,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_tum(
@@ -886,7 +886,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_tum(
@@ -895,7 +895,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_tum(
@@ -904,7 +904,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_tum(
@@ -913,7 +913,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_tum(
@@ -922,7 +922,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_tum(
@@ -931,7 +931,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_tum(
@@ -940,7 +940,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_tum(
@@ -949,7 +949,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tum(
@@ -958,7 +958,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_tum(
@@ -967,7 +967,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_tum(
@@ -976,7 +976,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_tum(
@@ -985,7 +985,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_tum(
@@ -994,7 +994,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_tum(
@@ -1003,7 +1003,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_tum(
@@ -1012,7 +1012,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_tum(
@@ -1021,7 +1021,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_tum(
@@ -1030,7 +1030,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_tumu(
@@ -1039,7 +1039,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_tumu(
@@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_tumu(
@@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t mas
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_tumu(
@@ -1066,7 +1066,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_tumu(
@@ -1075,7 +1075,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_tumu(
@@ -1084,7 +1084,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tumu(
@@ -1093,7 +1093,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_tumu(
@@ -1102,7 +1102,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t mas
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_tumu(
@@ -1111,7 +1111,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_tumu(
@@ -1120,7 +1120,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_tumu(
@@ -1129,7 +1129,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_tumu(
@@ -1138,7 +1138,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_tumu(
@@ -1147,7 +1147,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_tumu(
@@ -1156,7 +1156,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_tumu(
@@ -1165,7 +1165,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t masked
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_tumu(
@@ -1174,7 +1174,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_tumu(
@@ -1183,7 +1183,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_tumu(
@@ -1192,7 +1192,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_tumu(
@@ -1201,7 +1201,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_tumu(
@@ -1210,7 +1210,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_tumu(
@@ -1219,7 +1219,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_tumu(
@@ -1228,7 +1228,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_tumu(
@@ -1237,7 +1237,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_tumu(
@@ -1246,7 +1246,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_tumu(
@@ -1255,7 +1255,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_tumu(
@@ -1264,7 +1264,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_tumu(
@@ -1273,7 +1273,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tumu(
@@ -1282,7 +1282,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_tumu(
@@ -1291,7 +1291,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_tumu(
@@ -1300,7 +1300,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_tumu(
@@ -1309,7 +1309,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_tumu(
@@ -1318,7 +1318,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_tumu(
@@ -1327,7 +1327,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_tumu(
@@ -1336,7 +1336,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_tumu(
@@ -1345,7 +1345,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_tumu(
@@ -1354,7 +1354,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_tumu(
@@ -1363,7 +1363,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_tumu(
@@ -1372,7 +1372,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_tumu(
@@ -1381,7 +1381,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_tumu(
@@ -1390,7 +1390,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_tumu(
@@ -1399,7 +1399,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_tumu(
@@ -1408,7 +1408,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_tumu(
@@ -1417,7 +1417,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_tumu(
@@ -1426,7 +1426,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_tumu(
@@ -1435,7 +1435,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_tumu(
@@ -1444,7 +1444,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_tumu(
@@ -1453,7 +1453,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_tumu(
@@ -1462,7 +1462,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tumu(
@@ -1471,7 +1471,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_tumu(
@@ -1480,7 +1480,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_tumu(
@@ -1489,7 +1489,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_tumu(
@@ -1498,7 +1498,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_tumu(
@@ -1507,7 +1507,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_tumu(
@@ -1516,7 +1516,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_tumu(
@@ -1525,7 +1525,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_tumu(
@@ -1534,7 +1534,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_tumu(
@@ -1543,7 +1543,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_mu(
@@ -1552,7 +1552,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_mu(
@@ -1561,7 +1561,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_mu(
@@ -1570,7 +1570,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_mu(
@@ -1579,7 +1579,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_mu(
@@ -1588,7 +1588,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_mu(
@@ -1597,7 +1597,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_mu(
@@ -1606,7 +1606,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_mu(
@@ -1615,7 +1615,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_mu(
@@ -1624,7 +1624,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_mu(
@@ -1633,7 +1633,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_mu(
@@ -1642,7 +1642,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_mu(
@@ -1651,7 +1651,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_mu(
@@ -1660,7 +1660,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_mu(
@@ -1669,7 +1669,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_mu(
@@ -1678,7 +1678,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_mu(
@@ -1687,7 +1687,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_mu(
@@ -1696,7 +1696,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_mu(
@@ -1705,7 +1705,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_mu(
@@ -1714,7 +1714,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_mu(
@@ -1723,7 +1723,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_mu(
@@ -1732,7 +1732,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_mu(
@@ -1741,7 +1741,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_mu(
@@ -1750,7 +1750,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_mu(
@@ -1759,7 +1759,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_mu(
@@ -1768,7 +1768,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_mu(
@@ -1777,7 +1777,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_mu(
@@ -1786,7 +1786,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_mu(
@@ -1795,7 +1795,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_mu(
@@ -1804,7 +1804,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_mu(
@@ -1813,7 +1813,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_mu(
@@ -1822,7 +1822,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_mu(
@@ -1831,7 +1831,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_mu(
@@ -1840,7 +1840,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_mu(
@@ -1849,7 +1849,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_mu(
@@ -1858,7 +1858,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_mu(
@@ -1867,7 +1867,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_mu(
@@ -1876,7 +1876,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_mu(
@@ -1885,7 +1885,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_mu(
@@ -1894,7 +1894,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_mu(
@@ -1903,7 +1903,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_mu(
@@ -1912,7 +1912,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_mu(
@@ -1921,7 +1921,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_mu(
@@ -1930,7 +1930,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_mu(
@@ -1939,7 +1939,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_mu(
@@ -1948,7 +1948,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_mu(
@@ -1957,7 +1957,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_mu(
@@ -1966,7 +1966,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_mu(
@@ -1975,7 +1975,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_mu(
@@ -1984,7 +1984,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_mu(
@@ -1993,7 +1993,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_mu(
@@ -2002,7 +2002,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_mu(
@@ -2011,7 +2011,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_mu(
@@ -2020,7 +2020,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_mu(
@@ -2029,7 +2029,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_mu(
@@ -2038,7 +2038,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_mu(
@@ -2047,7 +2047,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_mu(
@@ -2056,6 +2056,6 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrgatherei16_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) {
- return vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrsub.c
index f6678d0dd92c..248e82695001 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrsub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_tu(
@@ -30,7 +30,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_tu(
@@ -39,7 +39,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_tu(
@@ -57,7 +57,7 @@ vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_tu(
@@ -66,7 +66,7 @@ vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_tu(
@@ -75,7 +75,7 @@ vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_tu(
@@ -84,7 +84,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_tu(
@@ -93,7 +93,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_tu(
@@ -111,7 +111,7 @@ vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_tu(
@@ -120,7 +120,7 @@ vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tu(
@@ -129,7 +129,7 @@ vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_tu(
@@ -138,7 +138,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_tu(
@@ -156,7 +156,7 @@ vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_tu(
@@ -165,7 +165,7 @@ vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_tu(
@@ -192,7 +192,7 @@ vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_tu(
@@ -201,7 +201,7 @@ vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_tu(
@@ -210,7 +210,7 @@ vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_tu(
@@ -219,7 +219,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_tu(
@@ -228,7 +228,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_tu(
@@ -237,7 +237,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_tu(
@@ -246,7 +246,7 @@ vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_tu(
@@ -255,7 +255,7 @@ vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_tu(
@@ -264,7 +264,7 @@ vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_tu(
@@ -273,7 +273,7 @@ vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_tu(
@@ -282,7 +282,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_tu(
@@ -291,7 +291,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_tu(
@@ -300,7 +300,7 @@ vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_tu(
@@ -309,7 +309,7 @@ vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_tu(
@@ -318,7 +318,7 @@ vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tu(
@@ -327,7 +327,7 @@ vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_tu(
@@ -336,7 +336,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_tu(
@@ -345,7 +345,7 @@ vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_tu(
@@ -354,7 +354,7 @@ vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_tu(
@@ -363,7 +363,7 @@ vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_tu(
@@ -372,7 +372,7 @@ vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_tu(
@@ -381,7 +381,7 @@ vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_tu(
@@ -390,7 +390,7 @@ vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vrsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_tum(
@@ -426,7 +426,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_tum(
@@ -435,7 +435,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_tum(
@@ -444,7 +444,7 @@ vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_tum(
@@ -453,7 +453,7 @@ vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_tum(
@@ -462,7 +462,7 @@ vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_tum(
@@ -471,7 +471,7 @@ vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_tum(
@@ -480,7 +480,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_tum(
@@ -489,7 +489,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_tum(
@@ -498,7 +498,7 @@ vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_tum(
@@ -507,7 +507,7 @@ vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_tum(
@@ -516,7 +516,7 @@ vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tum(
@@ -525,7 +525,7 @@ vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_tum(
@@ -534,7 +534,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_tum(
@@ -543,7 +543,7 @@ vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_tum(
@@ -552,7 +552,7 @@ vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_tum(
@@ -561,7 +561,7 @@ vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_tum(
@@ -570,7 +570,7 @@ vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_tum(
@@ -579,7 +579,7 @@ vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_tum(
@@ -588,7 +588,7 @@ vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_tum(
@@ -597,7 +597,7 @@ vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_tum(
@@ -606,7 +606,7 @@ vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_tum(
@@ -615,7 +615,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_tum(
@@ -624,7 +624,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_tum(
@@ -633,7 +633,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_tum(
@@ -642,7 +642,7 @@ vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_tum(
@@ -651,7 +651,7 @@ vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_tum(
@@ -660,7 +660,7 @@ vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_tum(
@@ -669,7 +669,7 @@ vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_tum(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_tum(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_tum(
@@ -696,7 +696,7 @@ vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_tum(
@@ -705,7 +705,7 @@ vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_tum(
@@ -714,7 +714,7 @@ vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tum(
@@ -723,7 +723,7 @@ vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_tum(
@@ -732,7 +732,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_tum(
@@ -741,7 +741,7 @@ vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_tum(
@@ -750,7 +750,7 @@ vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_tum(
@@ -759,7 +759,7 @@ vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_tum(
@@ -768,7 +768,7 @@ vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_tum(
@@ -777,7 +777,7 @@ vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_tum(
@@ -786,7 +786,7 @@ vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vrsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_tumu(
@@ -822,7 +822,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_tumu(
@@ -831,7 +831,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_tumu(
@@ -840,7 +840,7 @@ vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_tumu(
@@ -849,7 +849,7 @@ vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_tumu(
@@ -858,7 +858,7 @@ vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_tumu(
@@ -867,7 +867,7 @@ vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_tumu(
@@ -876,7 +876,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_tumu(
@@ -885,7 +885,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_tumu(
@@ -894,7 +894,7 @@ vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_tumu(
@@ -903,7 +903,7 @@ vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_tumu(
@@ -912,7 +912,7 @@ vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tumu(
@@ -921,7 +921,7 @@ vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_tumu(
@@ -930,7 +930,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_tumu(
@@ -939,7 +939,7 @@ vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_tumu(
@@ -948,7 +948,7 @@ vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_tumu(
@@ -957,7 +957,7 @@ vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_tumu(
@@ -966,7 +966,7 @@ vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_tumu(
@@ -975,7 +975,7 @@ vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_tumu(
@@ -984,7 +984,7 @@ vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_tumu(
@@ -993,7 +993,7 @@ vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_tumu(
@@ -1002,7 +1002,7 @@ vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_tumu(
@@ -1011,7 +1011,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_tumu(
@@ -1020,7 +1020,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_tumu(
@@ -1029,7 +1029,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_tumu(
@@ -1038,7 +1038,7 @@ vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_tumu(
@@ -1047,7 +1047,7 @@ vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_tumu(
@@ -1056,7 +1056,7 @@ vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_tumu(
@@ -1065,7 +1065,7 @@ vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_tumu(
@@ -1074,7 +1074,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_tumu(
@@ -1083,7 +1083,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_tumu(
@@ -1092,7 +1092,7 @@ vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_tumu(
@@ -1101,7 +1101,7 @@ vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_tumu(
@@ -1110,7 +1110,7 @@ vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tumu(
@@ -1119,7 +1119,7 @@ vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_tumu(
@@ -1137,7 +1137,7 @@ vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_tumu(
@@ -1146,7 +1146,7 @@ vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_tumu(
@@ -1155,7 +1155,7 @@ vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_tumu(
@@ -1164,7 +1164,7 @@ vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_tumu(
@@ -1173,7 +1173,7 @@ vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_tumu(
@@ -1182,7 +1182,7 @@ vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vrsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_mu(
@@ -1218,7 +1218,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_mu(
@@ -1227,7 +1227,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_mu(
@@ -1236,7 +1236,7 @@ vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_mu(
@@ -1245,7 +1245,7 @@ vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_mu(
@@ -1254,7 +1254,7 @@ vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_mu(
@@ -1263,7 +1263,7 @@ vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_mu(
@@ -1272,7 +1272,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_mu(
@@ -1281,7 +1281,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_mu(
@@ -1290,7 +1290,7 @@ vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_mu(
@@ -1299,7 +1299,7 @@ vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_mu(
@@ -1308,7 +1308,7 @@ vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_mu(
@@ -1317,7 +1317,7 @@ vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_mu(
@@ -1326,7 +1326,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_mu(
@@ -1335,7 +1335,7 @@ vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_mu(
@@ -1344,7 +1344,7 @@ vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_mu(
@@ -1353,7 +1353,7 @@ vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_mu(
@@ -1362,7 +1362,7 @@ vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_mu(
@@ -1371,7 +1371,7 @@ vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_mu(
@@ -1380,7 +1380,7 @@ vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_mu(
@@ -1389,7 +1389,7 @@ vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_mu(
@@ -1398,7 +1398,7 @@ vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_mu(
@@ -1407,7 +1407,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_mu(
@@ -1416,7 +1416,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_mu(
@@ -1425,7 +1425,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_mu(
@@ -1434,7 +1434,7 @@ vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_mu(
@@ -1443,7 +1443,7 @@ vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_mu(
@@ -1452,7 +1452,7 @@ vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_mu(
@@ -1461,7 +1461,7 @@ vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_mu(
@@ -1470,7 +1470,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_mu(
@@ -1479,7 +1479,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_mu(
@@ -1488,7 +1488,7 @@ vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_mu(
@@ -1497,7 +1497,7 @@ vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_mu(
@@ -1506,7 +1506,7 @@ vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_mu(
@@ -1515,7 +1515,7 @@ vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_mu(
@@ -1524,7 +1524,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_mu(
@@ -1533,7 +1533,7 @@ vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_mu(
@@ -1542,7 +1542,7 @@ vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_mu(
@@ -1551,7 +1551,7 @@ vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_mu(
@@ -1560,7 +1560,7 @@ vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_mu(
@@ -1569,7 +1569,7 @@ vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_mu(
@@ -1578,7 +1578,7 @@ vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vrsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vrsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsadd.c
index e292e4570630..3b37a3c128b2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsadd.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsadd_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsadd_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsadd_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsadd_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsaddu.c
index e43e121591d2..70dc4a527f8e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsaddu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsaddu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsaddu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsaddu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsbc.c
index 160840980dfd..b9972aad8cb3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsbc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsbc.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8_tu(
@@ -408,7 +408,7 @@ vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8_tu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4_tu(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4_tu(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2_tu(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2_tu(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1_tu(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1_tu(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2_tu(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2_tu(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4_tu(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4_tu(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8_tu(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8_tu(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4_tu(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4_tu(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2_tu(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2_tu(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1_tu(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1_tu(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2_tu(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2_tu(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4_tu(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4_tu(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8_tu(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8_tu(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2_tu(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2_tu(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1_tu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1_tu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2_tu(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2_tu(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4_tu(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4_tu(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8_tu(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8_tu(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1_tu(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1_tu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2_tu(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2_tu(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4_tu(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4_tu(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8_tu(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8_tu(
@@ -795,6 +795,6 @@ vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsbc_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) {
- return vsbc_tu(maskedoff, op1, op2, borrowin, vl);
+ return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext.c
index ebfdee9f7056..1443ec8c44c3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_tu(
@@ -21,7 +21,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_tu(
@@ -30,7 +30,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsext_vf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_tu(
@@ -39,7 +39,7 @@ vint16m1_t test_vsext_vf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsext_vf2_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_tu(
@@ -48,7 +48,7 @@ vint16m2_t test_vsext_vf2_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsext_vf2_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_tu(
@@ -57,7 +57,7 @@ vint16m4_t test_vsext_vf2_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsext_vf2_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tu(
@@ -66,7 +66,7 @@ vint16m8_t test_vsext_vf2_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tu(
@@ -75,7 +75,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tu(
@@ -84,7 +84,7 @@ vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tu(
@@ -93,7 +93,7 @@ vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tu(
@@ -102,7 +102,7 @@ vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t vl) {
- return vsext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tu(
@@ -111,7 +111,7 @@ vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf8_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tu(
@@ -120,7 +120,7 @@ vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf8_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tu(
@@ -129,7 +129,7 @@ vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf8_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tu(
@@ -138,7 +138,7 @@ vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf8_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tu(
@@ -147,7 +147,7 @@ vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_tu(
@@ -156,7 +156,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_tu(
@@ -165,7 +165,7 @@ vint32m1_t test_vsext_vf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf2_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_tu(
@@ -174,7 +174,7 @@ vint32m2_t test_vsext_vf2_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf2_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_tu(
@@ -183,7 +183,7 @@ vint32m4_t test_vsext_vf2_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf2_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tu(
@@ -192,7 +192,7 @@ vint32m8_t test_vsext_vf2_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vsext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tu(
@@ -201,7 +201,7 @@ vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vsext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tu(
@@ -210,7 +210,7 @@ vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t vl) {
- return vsext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tu(
@@ -219,7 +219,7 @@ vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t vl) {
- return vsext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tu(
@@ -228,7 +228,7 @@ vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_tu(
@@ -237,7 +237,7 @@ vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf2_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_tu(
@@ -246,7 +246,7 @@ vint64m2_t test_vsext_vf2_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf2_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_tu(
@@ -255,7 +255,7 @@ vint64m4_t test_vsext_vf2_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf2_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, size_t vl) {
- return vsext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_tum(
@@ -264,7 +264,7 @@ vint64m8_t test_vsext_vf2_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_tum(
@@ -273,7 +273,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_tum(
@@ -282,7 +282,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_tum(
@@ -291,7 +291,7 @@ vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_tum(
@@ -300,7 +300,7 @@ vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_tum(
@@ -309,7 +309,7 @@ vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tum(
@@ -318,7 +318,7 @@ vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tum(
@@ -327,7 +327,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tum(
@@ -336,7 +336,7 @@ vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tum(
@@ -345,7 +345,7 @@ vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tum(
@@ -354,7 +354,7 @@ vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) {
- return vsext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tum(
@@ -363,7 +363,7 @@ vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf8_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tum(
@@ -372,7 +372,7 @@ vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf8_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tum(
@@ -381,7 +381,7 @@ vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf8_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tum(
@@ -390,7 +390,7 @@ vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf8_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tum(
@@ -399,7 +399,7 @@ vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_tum(
@@ -408,7 +408,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_tum(
@@ -417,7 +417,7 @@ vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_tum(
@@ -426,7 +426,7 @@ vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_tum(
@@ -435,7 +435,7 @@ vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tum(
@@ -444,7 +444,7 @@ vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vsext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tum(
@@ -453,7 +453,7 @@ vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vsext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tum(
@@ -462,7 +462,7 @@ vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) {
- return vsext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tum(
@@ -471,7 +471,7 @@ vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) {
- return vsext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tum(
@@ -480,7 +480,7 @@ vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_tum(
@@ -489,7 +489,7 @@ vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_tum(
@@ -498,7 +498,7 @@ vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_tum(
@@ -507,7 +507,7 @@ vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) {
- return vsext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_tumu(
@@ -516,7 +516,7 @@ vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_tumu(
@@ -525,7 +525,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_tumu(
@@ -534,7 +534,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_tumu(
@@ -543,7 +543,7 @@ vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_tumu(
@@ -552,7 +552,7 @@ vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_tumu(
@@ -561,7 +561,7 @@ vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tumu(
@@ -570,7 +570,7 @@ vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tumu(
@@ -579,7 +579,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tumu(
@@ -588,7 +588,7 @@ vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tumu(
@@ -597,7 +597,7 @@ vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tumu(
@@ -606,7 +606,7 @@ vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) {
- return vsext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tumu(
@@ -615,7 +615,7 @@ vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf8_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tumu(
@@ -624,7 +624,7 @@ vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf8_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tumu(
@@ -633,7 +633,7 @@ vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf8_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tumu(
@@ -642,7 +642,7 @@ vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf8_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tumu(
@@ -651,7 +651,7 @@ vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_tumu(
@@ -660,7 +660,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_tumu(
@@ -669,7 +669,7 @@ vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_tumu(
@@ -678,7 +678,7 @@ vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_tumu(
@@ -687,7 +687,7 @@ vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tumu(
@@ -696,7 +696,7 @@ vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vsext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tumu(
@@ -705,7 +705,7 @@ vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vsext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tumu(
@@ -714,7 +714,7 @@ vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) {
- return vsext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tumu(
@@ -723,7 +723,7 @@ vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) {
- return vsext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tumu(
@@ -732,7 +732,7 @@ vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_tumu(
@@ -741,7 +741,7 @@ vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_tumu(
@@ -750,7 +750,7 @@ vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_tumu(
@@ -759,7 +759,7 @@ vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) {
- return vsext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_mu(
@@ -768,7 +768,7 @@ vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_mu(
@@ -777,7 +777,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_mu(
@@ -786,7 +786,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_mu(
@@ -795,7 +795,7 @@ vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_mu(
@@ -804,7 +804,7 @@ vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_mu(
@@ -813,7 +813,7 @@ vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_mu(
@@ -822,7 +822,7 @@ vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_mu(
@@ -831,7 +831,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_mu(
@@ -840,7 +840,7 @@ vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_mu(
@@ -849,7 +849,7 @@ vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_mu(
@@ -858,7 +858,7 @@ vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) {
- return vsext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mu(
@@ -867,7 +867,7 @@ vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) {
- return vsext_vf8_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mu(
@@ -876,7 +876,7 @@ vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) {
- return vsext_vf8_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mu(
@@ -885,7 +885,7 @@ vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) {
- return vsext_vf8_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mu(
@@ -894,7 +894,7 @@ vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) {
- return vsext_vf8_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_mu(
@@ -903,7 +903,7 @@ vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_mu(
@@ -912,7 +912,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_mu(
@@ -921,7 +921,7 @@ vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_mu(
@@ -930,7 +930,7 @@ vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_mu(
@@ -939,7 +939,7 @@ vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_mu(
@@ -948,7 +948,7 @@ vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) {
- return vsext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_mu(
@@ -957,7 +957,7 @@ vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) {
- return vsext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_mu(
@@ -966,7 +966,7 @@ vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) {
- return vsext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_mu(
@@ -975,7 +975,7 @@ vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) {
- return vsext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_mu(
@@ -984,7 +984,7 @@ vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_mu(
@@ -993,7 +993,7 @@ vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_mu(
@@ -1002,7 +1002,7 @@ vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_mu(
@@ -1011,6 +1011,6 @@ vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf2_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) {
- return vsext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1down.c
index cdd1ed32f173..cf100e6ecba4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1down.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, in
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_tu(
@@ -30,7 +30,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, in
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_tu(
@@ -39,7 +39,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, in
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_tu(
@@ -57,7 +57,7 @@ vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_tu(
@@ -66,7 +66,7 @@ vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_tu(
@@ -75,7 +75,7 @@ vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_tu(
@@ -84,7 +84,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_tu(
@@ -93,7 +93,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, in
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_tu(
@@ -111,7 +111,7 @@ vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, in
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_tu(
@@ -120,7 +120,7 @@ vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, in
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tu(
@@ -129,7 +129,7 @@ vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, in
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_tu(
@@ -138,7 +138,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, in
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_tu(
@@ -156,7 +156,7 @@ vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, in
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_tu(
@@ -165,7 +165,7 @@ vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, in
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, in
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, in
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_tu(
@@ -192,7 +192,7 @@ vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, in
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_tu(
@@ -201,7 +201,7 @@ vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, in
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_tu(
@@ -210,7 +210,7 @@ vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, in
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_tu(
@@ -219,7 +219,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_tu(
@@ -228,7 +228,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_tu(
@@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_tu(
@@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_tu(
@@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_tu(
@@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_tu(
@@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_tu(
@@ -282,7 +282,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_tu(
@@ -291,7 +291,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_tu(
@@ -300,7 +300,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_tu(
@@ -309,7 +309,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_tu(
@@ -318,7 +318,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tu(
@@ -327,7 +327,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_tu(
@@ -336,7 +336,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_tu(
@@ -345,7 +345,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_tu(
@@ -354,7 +354,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_tu(
@@ -363,7 +363,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_tu(
@@ -372,7 +372,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_tu(
@@ -381,7 +381,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_tu(
@@ -390,7 +390,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1down_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1down_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_tum(
@@ -426,7 +426,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_tum(
@@ -435,7 +435,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_tum(
@@ -444,7 +444,7 @@ vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_tum(
@@ -453,7 +453,7 @@ vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_tum(
@@ -462,7 +462,7 @@ vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_tum(
@@ -471,7 +471,7 @@ vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_tum(
@@ -480,7 +480,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_tum(
@@ -489,7 +489,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_tum(
@@ -498,7 +498,7 @@ vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_tum(
@@ -507,7 +507,7 @@ vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_tum(
@@ -516,7 +516,7 @@ vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tum(
@@ -525,7 +525,7 @@ vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_tum(
@@ -534,7 +534,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_tum(
@@ -543,7 +543,7 @@ vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_tum(
@@ -552,7 +552,7 @@ vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_tum(
@@ -561,7 +561,7 @@ vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_tum(
@@ -570,7 +570,7 @@ vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_tum(
@@ -579,7 +579,7 @@ vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_tum(
@@ -588,7 +588,7 @@ vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_tum(
@@ -597,7 +597,7 @@ vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_tum(
@@ -606,7 +606,7 @@ vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_tum(
@@ -615,7 +615,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_tum(
@@ -624,7 +624,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_tum(
@@ -633,7 +633,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_tum(
@@ -642,7 +642,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_tum(
@@ -651,7 +651,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_tum(
@@ -660,7 +660,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_tum(
@@ -669,7 +669,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_tum(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_tum(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_tum(
@@ -696,7 +696,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_tum(
@@ -705,7 +705,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_tum(
@@ -714,7 +714,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tum(
@@ -723,7 +723,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_tum(
@@ -732,7 +732,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_tum(
@@ -741,7 +741,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_tum(
@@ -750,7 +750,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_tum(
@@ -759,7 +759,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_tum(
@@ -768,7 +768,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_tum(
@@ -777,7 +777,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_tum(
@@ -786,7 +786,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1down_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_tumu(
@@ -822,7 +822,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_tumu(
@@ -831,7 +831,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_tumu(
@@ -840,7 +840,7 @@ vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_tumu(
@@ -849,7 +849,7 @@ vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_tumu(
@@ -858,7 +858,7 @@ vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_tumu(
@@ -867,7 +867,7 @@ vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_tumu(
@@ -876,7 +876,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_tumu(
@@ -885,7 +885,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_tumu(
@@ -894,7 +894,7 @@ vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_tumu(
@@ -903,7 +903,7 @@ vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_tumu(
@@ -912,7 +912,7 @@ vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tumu(
@@ -921,7 +921,7 @@ vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_tumu(
@@ -930,7 +930,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_tumu(
@@ -939,7 +939,7 @@ vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_tumu(
@@ -948,7 +948,7 @@ vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_tumu(
@@ -957,7 +957,7 @@ vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_tumu(
@@ -966,7 +966,7 @@ vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_tumu(
@@ -975,7 +975,7 @@ vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_tumu(
@@ -984,7 +984,7 @@ vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_tumu(
@@ -993,7 +993,7 @@ vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_tumu(
@@ -1002,7 +1002,7 @@ vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_tumu(
@@ -1011,7 +1011,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_tumu(
@@ -1020,7 +1020,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_tumu(
@@ -1029,7 +1029,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_tumu(
@@ -1038,7 +1038,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_tumu(
@@ -1047,7 +1047,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_tumu(
@@ -1056,7 +1056,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_tumu(
@@ -1065,7 +1065,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_tumu(
@@ -1074,7 +1074,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_tumu(
@@ -1083,7 +1083,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_tumu(
@@ -1092,7 +1092,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_tumu(
@@ -1101,7 +1101,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_tumu(
@@ -1110,7 +1110,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tumu(
@@ -1119,7 +1119,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_tumu(
@@ -1137,7 +1137,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_tumu(
@@ -1146,7 +1146,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_tumu(
@@ -1155,7 +1155,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_tumu(
@@ -1164,7 +1164,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_tumu(
@@ -1173,7 +1173,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_tumu(
@@ -1182,7 +1182,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1down_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_mu(
@@ -1218,7 +1218,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_mu(
@@ -1227,7 +1227,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_mu(
@@ -1236,7 +1236,7 @@ vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_mu(
@@ -1245,7 +1245,7 @@ vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_mu(
@@ -1254,7 +1254,7 @@ vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_mu(
@@ -1263,7 +1263,7 @@ vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_mu(
@@ -1272,7 +1272,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_mu(
@@ -1281,7 +1281,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_mu(
@@ -1290,7 +1290,7 @@ vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_mu(
@@ -1299,7 +1299,7 @@ vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_mu(
@@ -1308,7 +1308,7 @@ vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_mu(
@@ -1317,7 +1317,7 @@ vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_mu(
@@ -1326,7 +1326,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_mu(
@@ -1335,7 +1335,7 @@ vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_mu(
@@ -1344,7 +1344,7 @@ vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_mu(
@@ -1353,7 +1353,7 @@ vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_mu(
@@ -1362,7 +1362,7 @@ vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_mu(
@@ -1371,7 +1371,7 @@ vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_mu(
@@ -1380,7 +1380,7 @@ vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_mu(
@@ -1389,7 +1389,7 @@ vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_mu(
@@ -1398,7 +1398,7 @@ vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_mu(
@@ -1407,7 +1407,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_mu(
@@ -1416,7 +1416,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_mu(
@@ -1425,7 +1425,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_mu(
@@ -1434,7 +1434,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_mu(
@@ -1443,7 +1443,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_mu(
@@ -1452,7 +1452,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_mu(
@@ -1461,7 +1461,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_mu(
@@ -1470,7 +1470,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_mu(
@@ -1479,7 +1479,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_mu(
@@ -1488,7 +1488,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_mu(
@@ -1497,7 +1497,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_mu(
@@ -1506,7 +1506,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_mu(
@@ -1515,7 +1515,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_mu(
@@ -1524,7 +1524,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_mu(
@@ -1533,7 +1533,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_mu(
@@ -1542,7 +1542,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_mu(
@@ -1551,7 +1551,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_mu(
@@ -1560,7 +1560,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_mu(
@@ -1569,7 +1569,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_mu(
@@ -1578,7 +1578,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1down_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1down_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1up.c
index 0c279df82750..045e403387b4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslide1up.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_tu(
@@ -30,7 +30,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_tu(
@@ -39,7 +39,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_tu(
@@ -48,7 +48,7 @@ vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_tu(
@@ -57,7 +57,7 @@ vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_tu(
@@ -66,7 +66,7 @@ vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t v
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_tu(
@@ -75,7 +75,7 @@ vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_tu(
@@ -84,7 +84,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_tu(
@@ -93,7 +93,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_tu(
@@ -111,7 +111,7 @@ vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_tu(
@@ -120,7 +120,7 @@ vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tu(
@@ -129,7 +129,7 @@ vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_tu(
@@ -138,7 +138,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_tu(
@@ -156,7 +156,7 @@ vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_tu(
@@ -165,7 +165,7 @@ vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_tu(
@@ -174,7 +174,7 @@ vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_tu(
@@ -183,7 +183,7 @@ vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_tu(
@@ -192,7 +192,7 @@ vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_tu(
@@ -201,7 +201,7 @@ vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_tu(
@@ -210,7 +210,7 @@ vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_tu(
@@ -219,7 +219,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_tu(
@@ -228,7 +228,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_tu(
@@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, u
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_tu(
@@ -246,7 +246,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_tu(
@@ -255,7 +255,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_tu(
@@ -264,7 +264,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_tu(
@@ -273,7 +273,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_tu(
@@ -282,7 +282,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_tu(
@@ -291,7 +291,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_tu(
@@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, u
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_tu(
@@ -309,7 +309,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, u
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_tu(
@@ -318,7 +318,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, u
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tu(
@@ -327,7 +327,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, u
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_tu(
@@ -336,7 +336,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_tu(
@@ -345,7 +345,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_tu(
@@ -354,7 +354,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, u
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_tu(
@@ -363,7 +363,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, u
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_tu(
@@ -372,7 +372,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, u
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_tu(
@@ -381,7 +381,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_tu(
@@ -390,7 +390,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, u
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1up_tu(maskedoff, src, value, vl);
+ return __riscv_vslide1up_tu(maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, u
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_tum(
@@ -426,7 +426,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_tum(
@@ -435,7 +435,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_tum(
@@ -444,7 +444,7 @@ vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_tum(
@@ -453,7 +453,7 @@ vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_tum(
@@ -462,7 +462,7 @@ vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_tum(
@@ -471,7 +471,7 @@ vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_tum(
@@ -480,7 +480,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_tum(
@@ -489,7 +489,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_tum(
@@ -498,7 +498,7 @@ vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_tum(
@@ -507,7 +507,7 @@ vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_tum(
@@ -516,7 +516,7 @@ vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tum(
@@ -525,7 +525,7 @@ vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_tum(
@@ -534,7 +534,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_tum(
@@ -543,7 +543,7 @@ vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_tum(
@@ -552,7 +552,7 @@ vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_tum(
@@ -561,7 +561,7 @@ vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_tum(
@@ -570,7 +570,7 @@ vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_tum(
@@ -579,7 +579,7 @@ vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_tum(
@@ -588,7 +588,7 @@ vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_tum(
@@ -597,7 +597,7 @@ vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_tum(
@@ -606,7 +606,7 @@ vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_tum(
@@ -615,7 +615,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_tum(
@@ -624,7 +624,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_tum(
@@ -633,7 +633,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_tum(
@@ -642,7 +642,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_tum(
@@ -651,7 +651,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_tum(
@@ -660,7 +660,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_tum(
@@ -669,7 +669,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_tum(
@@ -678,7 +678,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_tum(
@@ -687,7 +687,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_tum(
@@ -696,7 +696,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_tum(
@@ -705,7 +705,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_tum(
@@ -714,7 +714,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tum(
@@ -723,7 +723,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_tum(
@@ -732,7 +732,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_tum(
@@ -741,7 +741,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_tum(
@@ -750,7 +750,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_tum(
@@ -759,7 +759,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_tum(
@@ -768,7 +768,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_tum(
@@ -777,7 +777,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_tum(
@@ -786,7 +786,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1up_tum(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_tumu(
@@ -822,7 +822,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_tumu(
@@ -831,7 +831,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_tumu(
@@ -840,7 +840,7 @@ vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_tumu(
@@ -849,7 +849,7 @@ vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_tumu(
@@ -858,7 +858,7 @@ vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_tumu(
@@ -867,7 +867,7 @@ vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_tumu(
@@ -876,7 +876,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_tumu(
@@ -885,7 +885,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_tumu(
@@ -894,7 +894,7 @@ vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_tumu(
@@ -903,7 +903,7 @@ vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_tumu(
@@ -912,7 +912,7 @@ vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tumu(
@@ -921,7 +921,7 @@ vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_tumu(
@@ -930,7 +930,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_tumu(
@@ -939,7 +939,7 @@ vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_tumu(
@@ -948,7 +948,7 @@ vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_tumu(
@@ -957,7 +957,7 @@ vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_tumu(
@@ -966,7 +966,7 @@ vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_tumu(
@@ -975,7 +975,7 @@ vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_tumu(
@@ -984,7 +984,7 @@ vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_tumu(
@@ -993,7 +993,7 @@ vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_tumu(
@@ -1002,7 +1002,7 @@ vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_tumu(
@@ -1011,7 +1011,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_tumu(
@@ -1020,7 +1020,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_tumu(
@@ -1029,7 +1029,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_tumu(
@@ -1038,7 +1038,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_tumu(
@@ -1047,7 +1047,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_tumu(
@@ -1056,7 +1056,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_tumu(
@@ -1065,7 +1065,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_tumu(
@@ -1074,7 +1074,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_tumu(
@@ -1083,7 +1083,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_tumu(
@@ -1092,7 +1092,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_tumu(
@@ -1101,7 +1101,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_tumu(
@@ -1110,7 +1110,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tumu(
@@ -1119,7 +1119,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_tumu(
@@ -1137,7 +1137,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_tumu(
@@ -1146,7 +1146,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_tumu(
@@ -1155,7 +1155,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_tumu(
@@ -1164,7 +1164,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_tumu(
@@ -1173,7 +1173,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_tumu(
@@ -1182,7 +1182,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1up_tumu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_mu(
@@ -1218,7 +1218,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_mu(
@@ -1227,7 +1227,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_mu(
@@ -1236,7 +1236,7 @@ vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_mu(
@@ -1245,7 +1245,7 @@ vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_mu(
@@ -1254,7 +1254,7 @@ vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_mu(
@@ -1263,7 +1263,7 @@ vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_mu(
@@ -1272,7 +1272,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_mu(
@@ -1281,7 +1281,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_mu(
@@ -1290,7 +1290,7 @@ vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_mu(
@@ -1299,7 +1299,7 @@ vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_mu(
@@ -1308,7 +1308,7 @@ vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_mu(
@@ -1317,7 +1317,7 @@ vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_mu(
@@ -1326,7 +1326,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_mu(
@@ -1335,7 +1335,7 @@ vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_mu(
@@ -1344,7 +1344,7 @@ vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_mu(
@@ -1353,7 +1353,7 @@ vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_mu(
@@ -1362,7 +1362,7 @@ vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_mu(
@@ -1371,7 +1371,7 @@ vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_mu(
@@ -1380,7 +1380,7 @@ vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_mu(
@@ -1389,7 +1389,7 @@ vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_mu(
@@ -1398,7 +1398,7 @@ vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_mu(
@@ -1407,7 +1407,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_mu(
@@ -1416,7 +1416,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_mu(
@@ -1425,7 +1425,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_mu(
@@ -1434,7 +1434,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_mu(
@@ -1443,7 +1443,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_mu(
@@ -1452,7 +1452,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_mu(
@@ -1461,7 +1461,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_mu(
@@ -1470,7 +1470,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_mu(
@@ -1479,7 +1479,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_mu(
@@ -1488,7 +1488,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_mu(
@@ -1497,7 +1497,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_mu(
@@ -1506,7 +1506,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_mu(
@@ -1515,7 +1515,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_mu(
@@ -1524,7 +1524,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_mu(
@@ -1533,7 +1533,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_mu(
@@ -1542,7 +1542,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_mu(
@@ -1551,7 +1551,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_mu(
@@ -1560,7 +1560,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_mu(
@@ -1569,7 +1569,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_mu(
@@ -1578,7 +1578,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslide1up_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) {
- return vslide1up_mu(mask, maskedoff, src, value, vl);
+ return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c
index 4675b667b235..b18f9f988e4b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t sr
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t sr
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t sr
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t sr
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t sr
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t sr
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t sr
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t sr
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tu(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t sr
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tu(
@@ -157,7 +157,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tu(
@@ -166,7 +166,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tu(
@@ -175,7 +175,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tu(
@@ -184,7 +184,7 @@ vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tu(
@@ -193,7 +193,7 @@ vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tu(
@@ -202,7 +202,7 @@ vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tu(
@@ -211,7 +211,7 @@ vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tu(
@@ -220,7 +220,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tu(
@@ -229,7 +229,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tu(
@@ -238,7 +238,7 @@ vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tu(
@@ -247,7 +247,7 @@ vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tu(
@@ -256,7 +256,7 @@ vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tu(
@@ -265,7 +265,7 @@ vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tu(
@@ -274,7 +274,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tu(
@@ -301,7 +301,7 @@ vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tu(
@@ -310,7 +310,7 @@ vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tu(
@@ -319,7 +319,7 @@ vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tu(
@@ -328,7 +328,7 @@ vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tu(
@@ -337,7 +337,7 @@ vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tu(
@@ -346,7 +346,7 @@ vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tu(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tu(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tu(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tu(
@@ -382,7 +382,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tu(
@@ -391,7 +391,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tu(
@@ -400,7 +400,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tu(
@@ -409,7 +409,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tu(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tu(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tu(
@@ -436,7 +436,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tu(
@@ -445,7 +445,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tu(
@@ -454,7 +454,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu(
@@ -463,7 +463,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tu(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tu(
@@ -481,7 +481,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tu(
@@ -490,7 +490,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tu(
@@ -499,7 +499,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tu(
@@ -508,7 +508,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tu(
@@ -517,7 +517,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tu(
@@ -526,7 +526,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tu(
@@ -535,7 +535,7 @@ vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) {
- return vslidedown_tu(maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tu(maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tum(
@@ -544,7 +544,7 @@ vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tum(
@@ -553,7 +553,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tum(
@@ -562,7 +562,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tum(
@@ -571,7 +571,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tum(
@@ -580,7 +580,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tum(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tum(
@@ -598,7 +598,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tum(
@@ -607,7 +607,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tum(
@@ -616,7 +616,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tum(
@@ -625,7 +625,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tum(
@@ -634,7 +634,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tum(
@@ -643,7 +643,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tum(
@@ -652,7 +652,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tum(
@@ -661,7 +661,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tum(
@@ -670,7 +670,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tum(
@@ -679,7 +679,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tum(
@@ -688,7 +688,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tum(
@@ -697,7 +697,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tum(
@@ -706,7 +706,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tum(
@@ -715,7 +715,7 @@ vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tum(
@@ -724,7 +724,7 @@ vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tum(
@@ -733,7 +733,7 @@ vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tum(
@@ -742,7 +742,7 @@ vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tum(
@@ -751,7 +751,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tum(
@@ -760,7 +760,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tum(
@@ -769,7 +769,7 @@ vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tum(
@@ -778,7 +778,7 @@ vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tum(
@@ -787,7 +787,7 @@ vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tum(
@@ -796,7 +796,7 @@ vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tum(
@@ -805,7 +805,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tum(
@@ -814,7 +814,7 @@ vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tum(
@@ -823,7 +823,7 @@ vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tum(
@@ -832,7 +832,7 @@ vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tum(
@@ -841,7 +841,7 @@ vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tum(
@@ -850,7 +850,7 @@ vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tum(
@@ -859,7 +859,7 @@ vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tum(
@@ -868,7 +868,7 @@ vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tum(
@@ -877,7 +877,7 @@ vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tum(
@@ -886,7 +886,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tum(
@@ -895,7 +895,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tum(
@@ -904,7 +904,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tum(
@@ -913,7 +913,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tum(
@@ -922,7 +922,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tum(
@@ -931,7 +931,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tum(
@@ -940,7 +940,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tum(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tum(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tum(
@@ -967,7 +967,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tum(
@@ -976,7 +976,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tum(
@@ -985,7 +985,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tum(
@@ -994,7 +994,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tum(
@@ -1003,7 +1003,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tum(
@@ -1012,7 +1012,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tum(
@@ -1021,7 +1021,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tum(
@@ -1030,7 +1030,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tum(
@@ -1039,7 +1039,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tum(
@@ -1048,7 +1048,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tum(
@@ -1057,7 +1057,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tum(
@@ -1066,7 +1066,7 @@ vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) {
- return vslidedown_tum(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tumu(
@@ -1075,7 +1075,7 @@ vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tumu(
@@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tumu(
@@ -1093,7 +1093,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tumu(
@@ -1102,7 +1102,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tumu(
@@ -1111,7 +1111,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tumu(
@@ -1120,7 +1120,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tumu(
@@ -1129,7 +1129,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tumu(
@@ -1138,7 +1138,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tumu(
@@ -1147,7 +1147,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tumu(
@@ -1156,7 +1156,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tumu(
@@ -1165,7 +1165,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tumu(
@@ -1174,7 +1174,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tumu(
@@ -1183,7 +1183,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tumu(
@@ -1192,7 +1192,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tumu(
@@ -1201,7 +1201,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tumu(
@@ -1210,7 +1210,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tumu(
@@ -1219,7 +1219,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tumu(
@@ -1228,7 +1228,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tumu(
@@ -1237,7 +1237,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tumu(
@@ -1246,7 +1246,7 @@ vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tumu(
@@ -1255,7 +1255,7 @@ vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tumu(
@@ -1264,7 +1264,7 @@ vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tumu(
@@ -1273,7 +1273,7 @@ vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tumu(
@@ -1282,7 +1282,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tumu(
@@ -1291,7 +1291,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tumu(
@@ -1300,7 +1300,7 @@ vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tumu(
@@ -1309,7 +1309,7 @@ vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tumu(
@@ -1318,7 +1318,7 @@ vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tumu(
@@ -1327,7 +1327,7 @@ vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tumu(
@@ -1336,7 +1336,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tumu(
@@ -1345,7 +1345,7 @@ vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tumu(
@@ -1354,7 +1354,7 @@ vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tumu(
@@ -1363,7 +1363,7 @@ vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tumu(
@@ -1372,7 +1372,7 @@ vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tumu(
@@ -1381,7 +1381,7 @@ vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tumu(
@@ -1390,7 +1390,7 @@ vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tumu(
@@ -1399,7 +1399,7 @@ vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tumu(
@@ -1408,7 +1408,7 @@ vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tumu(
@@ -1417,7 +1417,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tumu(
@@ -1426,7 +1426,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tumu(
@@ -1435,7 +1435,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tumu(
@@ -1444,7 +1444,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tumu(
@@ -1453,7 +1453,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tumu(
@@ -1462,7 +1462,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tumu(
@@ -1471,7 +1471,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tumu(
@@ -1480,7 +1480,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tumu(
@@ -1489,7 +1489,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tumu(
@@ -1498,7 +1498,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tumu(
@@ -1507,7 +1507,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tumu(
@@ -1516,7 +1516,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tumu(
@@ -1525,7 +1525,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tumu(
@@ -1534,7 +1534,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tumu(
@@ -1543,7 +1543,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tumu(
@@ -1552,7 +1552,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tumu(
@@ -1561,7 +1561,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tumu(
@@ -1570,7 +1570,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tumu(
@@ -1579,7 +1579,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tumu(
@@ -1588,7 +1588,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tumu(
@@ -1597,7 +1597,7 @@ vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) {
- return vslidedown_tumu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_mu(
@@ -1606,7 +1606,7 @@ vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_mu(
@@ -1615,7 +1615,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_mu(
@@ -1624,7 +1624,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_mu(
@@ -1633,7 +1633,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_mu(
@@ -1642,7 +1642,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_mu(
@@ -1651,7 +1651,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_mu(
@@ -1660,7 +1660,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_mu(
@@ -1669,7 +1669,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_mu(
@@ -1678,7 +1678,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_mu(
@@ -1687,7 +1687,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_mu(
@@ -1696,7 +1696,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_mu(
@@ -1705,7 +1705,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_mu(
@@ -1714,7 +1714,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_mu(
@@ -1723,7 +1723,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_mu(
@@ -1732,7 +1732,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_mu(
@@ -1741,7 +1741,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_mu(
@@ -1750,7 +1750,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_mu(
@@ -1759,7 +1759,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_mu(
@@ -1768,7 +1768,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_mu(
@@ -1777,7 +1777,7 @@ vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_mu(
@@ -1786,7 +1786,7 @@ vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_mu(
@@ -1795,7 +1795,7 @@ vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_mu(
@@ -1804,7 +1804,7 @@ vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_mu(
@@ -1813,7 +1813,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_mu(
@@ -1822,7 +1822,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_mu(
@@ -1831,7 +1831,7 @@ vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_mu(
@@ -1840,7 +1840,7 @@ vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_mu(
@@ -1849,7 +1849,7 @@ vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_mu(
@@ -1858,7 +1858,7 @@ vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_mu(
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_mu(
@@ -1876,7 +1876,7 @@ vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_mu(
@@ -1885,7 +1885,7 @@ vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_mu(
@@ -1894,7 +1894,7 @@ vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_mu(
@@ -1903,7 +1903,7 @@ vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_mu(
@@ -1912,7 +1912,7 @@ vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_mu(
@@ -1921,7 +1921,7 @@ vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_mu(
@@ -1930,7 +1930,7 @@ vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_mu(
@@ -1939,7 +1939,7 @@ vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_mu(
@@ -1948,7 +1948,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_mu(
@@ -1957,7 +1957,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_mu(
@@ -1966,7 +1966,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_mu(
@@ -1975,7 +1975,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_mu(
@@ -1984,7 +1984,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_mu(
@@ -1993,7 +1993,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_mu(
@@ -2002,7 +2002,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_mu(
@@ -2011,7 +2011,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_mu(
@@ -2020,7 +2020,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_mu(
@@ -2029,7 +2029,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_mu(
@@ -2038,7 +2038,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_mu(
@@ -2047,7 +2047,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_mu(
@@ -2056,7 +2056,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_mu(
@@ -2065,7 +2065,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_mu(
@@ -2074,7 +2074,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_mu(
@@ -2083,7 +2083,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_mu(
@@ -2092,7 +2092,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_mu(
@@ -2101,7 +2101,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_mu(
@@ -2110,7 +2110,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_mu(
@@ -2119,7 +2119,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_mu(
@@ -2128,6 +2128,6 @@ vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslidedown_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) {
- return vslidedown_mu(mask, maskedoff, src, offset, vl);
+ return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslideup.c
index d592ca92a981..ba3b618cfc82 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslideup.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_tu(
@@ -22,7 +22,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t dest, vfloat16mf4_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_tu(
@@ -31,7 +31,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t dest, vfloat16mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_tu(
@@ -40,7 +40,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t dest, vfloat16m1_t src, size
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_tu(
@@ -49,7 +49,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t dest, vfloat16m2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_tu(
@@ -58,7 +58,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t dest, vfloat16m4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tu(
@@ -67,7 +67,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t dest, vfloat16m8_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_tu(
@@ -76,7 +76,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t dest, vfloat32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_tu(
@@ -85,7 +85,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t dest, vfloat32m1_t src, size
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_tu(
@@ -94,7 +94,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t dest, vfloat32m2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_tu(
@@ -103,7 +103,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t dest, vfloat32m4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_tu(
@@ -112,7 +112,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t dest, vfloat32m8_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_tu(
@@ -121,7 +121,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t dest, vfloat64m1_t src, size
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_tu(
@@ -130,7 +130,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t dest, vfloat64m2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_tu(
@@ -139,7 +139,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t dest, vfloat64m4_t src, size
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_tu(
@@ -148,7 +148,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t dest, vfloat64m8_t src, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_tu(
@@ -157,7 +157,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t dest, vint8mf8_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_tu(
@@ -166,7 +166,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t dest, vint8mf4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_tu(
@@ -175,7 +175,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t dest, vint8mf2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_tu(
@@ -184,7 +184,7 @@ vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t dest, vint8m1_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_tu(
@@ -193,7 +193,7 @@ vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t dest, vint8m2_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_tu(
@@ -202,7 +202,7 @@ vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t dest, vint8m4_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_tu(
@@ -211,7 +211,7 @@ vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t dest, vint8m8_t src, size_t offset,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_tu(
@@ -220,7 +220,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t dest, vint16mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_tu(
@@ -229,7 +229,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t dest, vint16mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_tu(
@@ -238,7 +238,7 @@ vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t dest, vint16m1_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_tu(
@@ -247,7 +247,7 @@ vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t dest, vint16m2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_tu(
@@ -256,7 +256,7 @@ vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t dest, vint16m4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tu(
@@ -265,7 +265,7 @@ vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t dest, vint16m8_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_tu(
@@ -274,7 +274,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t dest, vint32mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_tu(
@@ -283,7 +283,7 @@ vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t dest, vint32m1_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_tu(
@@ -292,7 +292,7 @@ vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t dest, vint32m2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_tu(
@@ -301,7 +301,7 @@ vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t dest, vint32m4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_tu(
@@ -310,7 +310,7 @@ vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t dest, vint32m8_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_tu(
@@ -319,7 +319,7 @@ vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t dest, vint64m1_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_tu(
@@ -328,7 +328,7 @@ vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t dest, vint64m2_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_tu(
@@ -337,7 +337,7 @@ vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t dest, vint64m4_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_tu(
@@ -346,7 +346,7 @@ vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t dest, vint64m8_t src, size_t off
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_tu(
@@ -355,7 +355,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t dest, vuint8mf8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_tu(
@@ -364,7 +364,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t dest, vuint8mf4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_tu(
@@ -373,7 +373,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t dest, vuint8mf2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_tu(
@@ -382,7 +382,7 @@ vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t dest, vuint8m1_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_tu(
@@ -391,7 +391,7 @@ vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t dest, vuint8m2_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_tu(
@@ -400,7 +400,7 @@ vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t dest, vuint8m4_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_tu(
@@ -409,7 +409,7 @@ vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t dest, vuint8m8_t src, size_t offs
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_tu(
@@ -418,7 +418,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t dest, vuint16mf4_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_tu(
@@ -427,7 +427,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t dest, vuint16mf2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_tu(
@@ -436,7 +436,7 @@ vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t dest, vuint16m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_tu(
@@ -445,7 +445,7 @@ vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t dest, vuint16m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_tu(
@@ -454,7 +454,7 @@ vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t dest, vuint16m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tu(
@@ -463,7 +463,7 @@ vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t dest, vuint16m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_tu(
@@ -472,7 +472,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t dest, vuint32mf2_t src, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_tu(
@@ -481,7 +481,7 @@ vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t dest, vuint32m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_tu(
@@ -490,7 +490,7 @@ vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t dest, vuint32m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_tu(
@@ -499,7 +499,7 @@ vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t dest, vuint32m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_tu(
@@ -508,7 +508,7 @@ vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t dest, vuint32m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_tu(
@@ -517,7 +517,7 @@ vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t dest, vuint64m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_tu(
@@ -526,7 +526,7 @@ vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t dest, vuint64m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_tu(
@@ -535,7 +535,7 @@ vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t dest, vuint64m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
- return vslideup_tu(dest, src, offset, vl);
+ return __riscv_vslideup_tu(dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_tum(
@@ -544,7 +544,7 @@ vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t dest, vuint64m8_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_tum(
@@ -553,7 +553,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t dest, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_tum(
@@ -562,7 +562,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t dest, vf
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_tum(
@@ -571,7 +571,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_tum(
@@ -580,7 +580,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_tum(
@@ -589,7 +589,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tum(
@@ -598,7 +598,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_tum(
@@ -607,7 +607,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t dest, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_tum(
@@ -616,7 +616,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_tum(
@@ -625,7 +625,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_tum(
@@ -634,7 +634,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_tum(
@@ -643,7 +643,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_tum(
@@ -652,7 +652,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_tum(
@@ -661,7 +661,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_tum(
@@ -670,7 +670,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_tum(
@@ -679,7 +679,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_tum(
@@ -688,7 +688,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t dest, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_tum(
@@ -697,7 +697,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t dest, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_tum(
@@ -706,7 +706,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t dest, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_tum(
@@ -715,7 +715,7 @@ vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t mask, vint8m1_t dest, vint8m1_t src
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_tum(
@@ -724,7 +724,7 @@ vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t mask, vint8m2_t dest, vint8m2_t src
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_tum(
@@ -733,7 +733,7 @@ vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t mask, vint8m4_t dest, vint8m4_t src
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_tum(
@@ -742,7 +742,7 @@ vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t mask, vint8m8_t dest, vint8m8_t src
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_tum(
@@ -751,7 +751,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t dest, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_tum(
@@ -760,7 +760,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t dest, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_tum(
@@ -769,7 +769,7 @@ vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t mask, vint16m1_t dest, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_tum(
@@ -778,7 +778,7 @@ vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t mask, vint16m2_t dest, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_tum(
@@ -787,7 +787,7 @@ vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t mask, vint16m4_t dest, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tum(
@@ -796,7 +796,7 @@ vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t mask, vint16m8_t dest, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_tum(
@@ -805,7 +805,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t dest, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_tum(
@@ -814,7 +814,7 @@ vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t mask, vint32m1_t dest, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_tum(
@@ -823,7 +823,7 @@ vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t mask, vint32m2_t dest, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_tum(
@@ -832,7 +832,7 @@ vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t mask, vint32m4_t dest, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_tum(
@@ -841,7 +841,7 @@ vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t mask, vint32m8_t dest, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_tum(
@@ -850,7 +850,7 @@ vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t mask, vint64m1_t dest, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_tum(
@@ -859,7 +859,7 @@ vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t mask, vint64m2_t dest, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_tum(
@@ -868,7 +868,7 @@ vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t mask, vint64m4_t dest, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_tum(
@@ -877,7 +877,7 @@ vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t mask, vint64m8_t dest, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_tum(
@@ -886,7 +886,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t dest, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_tum(
@@ -895,7 +895,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t dest, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_tum(
@@ -904,7 +904,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t dest, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_tum(
@@ -913,7 +913,7 @@ vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t mask, vuint8m1_t dest, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_tum(
@@ -922,7 +922,7 @@ vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t mask, vuint8m2_t dest, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_tum(
@@ -931,7 +931,7 @@ vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t mask, vuint8m4_t dest, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_tum(
@@ -940,7 +940,7 @@ vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t mask, vuint8m8_t dest, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_tum(
@@ -949,7 +949,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t dest, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_tum(
@@ -958,7 +958,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t dest, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_tum(
@@ -967,7 +967,7 @@ vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t mask, vuint16m1_t dest, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_tum(
@@ -976,7 +976,7 @@ vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t mask, vuint16m2_t dest, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_tum(
@@ -985,7 +985,7 @@ vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t mask, vuint16m4_t dest, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tum(
@@ -994,7 +994,7 @@ vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t mask, vuint16m8_t dest, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_tum(
@@ -1003,7 +1003,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t dest, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_tum(
@@ -1012,7 +1012,7 @@ vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t mask, vuint32m1_t dest, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_tum(
@@ -1021,7 +1021,7 @@ vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t mask, vuint32m2_t dest, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_tum(
@@ -1030,7 +1030,7 @@ vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t mask, vuint32m4_t dest, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_tum(
@@ -1039,7 +1039,7 @@ vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t mask, vuint32m8_t dest, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_tum(
@@ -1048,7 +1048,7 @@ vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t mask, vuint64m1_t dest, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_tum(
@@ -1057,7 +1057,7 @@ vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t mask, vuint64m2_t dest, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_tum(
@@ -1066,7 +1066,7 @@ vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t mask, vuint64m4_t dest, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
- return vslideup_tum(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tum(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_tumu(
@@ -1075,7 +1075,7 @@ vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t mask, vuint64m8_t dest, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_tumu(
@@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t dest, v
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_tumu(
@@ -1093,7 +1093,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t dest, v
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_tumu(
@@ -1102,7 +1102,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t dest, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_tumu(
@@ -1111,7 +1111,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_tumu(
@@ -1120,7 +1120,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tumu(
@@ -1129,7 +1129,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_tumu(
@@ -1138,7 +1138,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t dest, v
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_tumu(
@@ -1147,7 +1147,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t dest, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_tumu(
@@ -1156,7 +1156,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t dest, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_tumu(
@@ -1165,7 +1165,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_tumu(
@@ -1174,7 +1174,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_tumu(
@@ -1183,7 +1183,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t dest, vflo
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_tumu(
@@ -1192,7 +1192,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t dest, vflo
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_tumu(
@@ -1201,7 +1201,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t dest, vflo
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_tumu(
@@ -1210,7 +1210,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t dest, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_tumu(
@@ -1219,7 +1219,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t dest, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_tumu(
@@ -1228,7 +1228,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t dest, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_tumu(
@@ -1237,7 +1237,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t dest, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_tumu(
@@ -1246,7 +1246,7 @@ vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t mask, vint8m1_t dest, vint8m1_t sr
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_tumu(
@@ -1255,7 +1255,7 @@ vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t mask, vint8m2_t dest, vint8m2_t sr
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_tumu(
@@ -1264,7 +1264,7 @@ vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t mask, vint8m4_t dest, vint8m4_t sr
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_tumu(
@@ -1273,7 +1273,7 @@ vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t mask, vint8m8_t dest, vint8m8_t sr
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_tumu(
@@ -1282,7 +1282,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t dest, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_tumu(
@@ -1291,7 +1291,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t dest, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_tumu(
@@ -1300,7 +1300,7 @@ vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t mask, vint16m1_t dest, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_tumu(
@@ -1309,7 +1309,7 @@ vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t mask, vint16m2_t dest, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_tumu(
@@ -1318,7 +1318,7 @@ vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t mask, vint16m4_t dest, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tumu(
@@ -1327,7 +1327,7 @@ vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t mask, vint16m8_t dest, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_tumu(
@@ -1336,7 +1336,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t dest, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_tumu(
@@ -1345,7 +1345,7 @@ vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t mask, vint32m1_t dest, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_tumu(
@@ -1354,7 +1354,7 @@ vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t mask, vint32m2_t dest, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_tumu(
@@ -1363,7 +1363,7 @@ vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t mask, vint32m4_t dest, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_tumu(
@@ -1372,7 +1372,7 @@ vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t mask, vint32m8_t dest, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_tumu(
@@ -1381,7 +1381,7 @@ vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t mask, vint64m1_t dest, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_tumu(
@@ -1390,7 +1390,7 @@ vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t mask, vint64m2_t dest, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_tumu(
@@ -1399,7 +1399,7 @@ vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t mask, vint64m4_t dest, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_tumu(
@@ -1408,7 +1408,7 @@ vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t mask, vint64m8_t dest, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_tumu(
@@ -1417,7 +1417,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t dest, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_tumu(
@@ -1426,7 +1426,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t dest, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_tumu(
@@ -1435,7 +1435,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t dest, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_tumu(
@@ -1444,7 +1444,7 @@ vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_tumu(
@@ -1453,7 +1453,7 @@ vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_tumu(
@@ -1462,7 +1462,7 @@ vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_tumu(
@@ -1471,7 +1471,7 @@ vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_tumu(
@@ -1480,7 +1480,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t dest, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_tumu(
@@ -1489,7 +1489,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t dest, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_tumu(
@@ -1498,7 +1498,7 @@ vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t dest, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_tumu(
@@ -1507,7 +1507,7 @@ vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t dest, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_tumu(
@@ -1516,7 +1516,7 @@ vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t dest, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tumu(
@@ -1525,7 +1525,7 @@ vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t dest, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_tumu(
@@ -1534,7 +1534,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t dest, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_tumu(
@@ -1543,7 +1543,7 @@ vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t dest, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_tumu(
@@ -1552,7 +1552,7 @@ vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t dest, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_tumu(
@@ -1561,7 +1561,7 @@ vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t dest, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_tumu(
@@ -1570,7 +1570,7 @@ vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t dest, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_tumu(
@@ -1579,7 +1579,7 @@ vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t dest, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_tumu(
@@ -1588,7 +1588,7 @@ vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t dest, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_tumu(
@@ -1597,7 +1597,7 @@ vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t dest, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
- return vslideup_tumu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_tumu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_mu(
@@ -1606,7 +1606,7 @@ vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t dest, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_mu(
@@ -1615,7 +1615,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t dest, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_mu(
@@ -1624,7 +1624,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t dest, vfl
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_mu(
@@ -1633,7 +1633,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_mu(
@@ -1642,7 +1642,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t dest, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_mu(
@@ -1651,7 +1651,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t dest, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_mu(
@@ -1660,7 +1660,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t dest, vfloat1
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_mu(
@@ -1669,7 +1669,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t dest, vfl
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_mu(
@@ -1678,7 +1678,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_mu(
@@ -1687,7 +1687,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_mu(
@@ -1696,7 +1696,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t dest, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_mu(
@@ -1705,7 +1705,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t dest, vfloat3
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_mu(
@@ -1714,7 +1714,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_mu(
@@ -1723,7 +1723,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_mu(
@@ -1732,7 +1732,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t dest, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_mu(
@@ -1741,7 +1741,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t dest, vfloat6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_mu(
@@ -1750,7 +1750,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_mu(
@@ -1759,7 +1759,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_mu(
@@ -1768,7 +1768,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_mu(
@@ -1777,7 +1777,7 @@ vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t mask, vint8m1_t dest, vint8m1_t src,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_mu(
@@ -1786,7 +1786,7 @@ vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t mask, vint8m2_t dest, vint8m2_t src,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_mu(
@@ -1795,7 +1795,7 @@ vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t mask, vint8m4_t dest, vint8m4_t src,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_mu(
@@ -1804,7 +1804,7 @@ vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t mask, vint8m8_t dest, vint8m8_t src,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_mu(
@@ -1813,7 +1813,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t dest, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_mu(
@@ -1822,7 +1822,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t dest, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_mu(
@@ -1831,7 +1831,7 @@ vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t mask, vint16m1_t dest, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_mu(
@@ -1840,7 +1840,7 @@ vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t mask, vint16m2_t dest, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_mu(
@@ -1849,7 +1849,7 @@ vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t mask, vint16m4_t dest, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_mu(
@@ -1858,7 +1858,7 @@ vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t mask, vint16m8_t dest, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_mu(
@@ -1867,7 +1867,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t dest, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_mu(
@@ -1876,7 +1876,7 @@ vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t mask, vint32m1_t dest, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_mu(
@@ -1885,7 +1885,7 @@ vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t mask, vint32m2_t dest, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_mu(
@@ -1894,7 +1894,7 @@ vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t mask, vint32m4_t dest, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_mu(
@@ -1903,7 +1903,7 @@ vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t mask, vint32m8_t dest, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_mu(
@@ -1912,7 +1912,7 @@ vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t mask, vint64m1_t dest, vint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_mu(
@@ -1921,7 +1921,7 @@ vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t mask, vint64m2_t dest, vint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_mu(
@@ -1930,7 +1930,7 @@ vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t mask, vint64m4_t dest, vint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_mu(
@@ -1939,7 +1939,7 @@ vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t mask, vint64m8_t dest, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_mu(
@@ -1948,7 +1948,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t dest, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_mu(
@@ -1957,7 +1957,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t dest, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_mu(
@@ -1966,7 +1966,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t dest, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_mu(
@@ -1975,7 +1975,7 @@ vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_mu(
@@ -1984,7 +1984,7 @@ vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_mu(
@@ -1993,7 +1993,7 @@ vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_mu(
@@ -2002,7 +2002,7 @@ vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_mu(
@@ -2011,7 +2011,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t dest, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_mu(
@@ -2020,7 +2020,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t dest, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_mu(
@@ -2029,7 +2029,7 @@ vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t mask, vuint16m1_t dest, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_mu(
@@ -2038,7 +2038,7 @@ vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t mask, vuint16m2_t dest, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_mu(
@@ -2047,7 +2047,7 @@ vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t mask, vuint16m4_t dest, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_mu(
@@ -2056,7 +2056,7 @@ vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t mask, vuint16m8_t dest, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_mu(
@@ -2065,7 +2065,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t dest, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_mu(
@@ -2074,7 +2074,7 @@ vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t mask, vuint32m1_t dest, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_mu(
@@ -2083,7 +2083,7 @@ vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t mask, vuint32m2_t dest, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_mu(
@@ -2092,7 +2092,7 @@ vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t mask, vuint32m4_t dest, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_mu(
@@ -2101,7 +2101,7 @@ vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t mask, vuint32m8_t dest, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_mu(
@@ -2110,7 +2110,7 @@ vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t mask, vuint64m1_t dest, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_mu(
@@ -2119,7 +2119,7 @@ vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t mask, vuint64m2_t dest, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_mu(
@@ -2128,6 +2128,6 @@ vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t mask, vuint64m4_t dest, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslideup_vx_u64m8_mu(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
- return vslideup_mu(mask, dest, src, offset, vl);
+ return __riscv_vslideup_mu(mask, dest, src, offset, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsll.c
index 5902fe7c828d..3a89414ef7f0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsll.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsll.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t sh
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t sh
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_tu(
@@ -408,7 +408,7 @@ vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_tu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_tu(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_tu(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_tu(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_tu(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_tu(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_tu(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_tu(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_tu(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_tu(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_tu(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_tu(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_tu(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_tu(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_tu(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_tu(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_tu(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_tu(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_tu(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_tu(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_tu(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_tu(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_tu(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_tu(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_tu(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tu(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tu(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_tu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_tu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_tu(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_tu(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_tu(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_tu(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_tu(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_tu(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_tu(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_tu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_tu(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_tu(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_tu(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_tu(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_tu(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_tu(
@@ -795,7 +795,7 @@ vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsll_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsll_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_tum(
@@ -804,7 +804,7 @@ vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_tum(
@@ -813,7 +813,7 @@ vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_tum(
@@ -822,7 +822,7 @@ vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_tum(
@@ -831,7 +831,7 @@ vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_tum(
@@ -840,7 +840,7 @@ vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_tum(
@@ -849,7 +849,7 @@ vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_tum(
@@ -858,7 +858,7 @@ vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_tum(
@@ -867,7 +867,7 @@ vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_tum(
@@ -876,7 +876,7 @@ vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_tum(
@@ -885,7 +885,7 @@ vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_tum(
@@ -894,7 +894,7 @@ vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_tum(
@@ -903,7 +903,7 @@ vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_tum(
@@ -912,7 +912,7 @@ vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_tum(
@@ -921,7 +921,7 @@ vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_tum(
@@ -930,7 +930,7 @@ vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_tum(
@@ -939,7 +939,7 @@ vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_tum(
@@ -948,7 +948,7 @@ vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_tum(
@@ -957,7 +957,7 @@ vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_tum(
@@ -966,7 +966,7 @@ vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_tum(
@@ -975,7 +975,7 @@ vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_tum(
@@ -984,7 +984,7 @@ vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_tum(
@@ -993,7 +993,7 @@ vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_tum(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_tum(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_tum(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_tum(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tum(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tum(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_tum(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_tum(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_tum(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_tum(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_tum(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_tum(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_tum(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_tum(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_tum(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_tum(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_tum(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_tum(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_tum(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_tum(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_tum(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_tum(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_tum(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_tum(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_tum(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_tum(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_tum(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_tum(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_tum(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_tum(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_tum(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_tum(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_tum(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_tum(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_tum(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_tum(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_tum(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_tum(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_tum(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_tum(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_tum(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_tum(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_tum(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_tum(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_tum(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_tum(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_tum(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_tum(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tum(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tum(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_tum(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_tum(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_tum(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_tum(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_tum(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_tum(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_tum(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_tum(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_tum(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_tum(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_tum(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_tum(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_tum(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_tum(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_tum(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_tum(
@@ -1587,7 +1587,7 @@ vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsll_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_tumu(
@@ -1596,7 +1596,7 @@ vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_tumu(
@@ -1605,7 +1605,7 @@ vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_tumu(
@@ -1614,7 +1614,7 @@ vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_tumu(
@@ -1623,7 +1623,7 @@ vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_tumu(
@@ -1632,7 +1632,7 @@ vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_tumu(
@@ -1641,7 +1641,7 @@ vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_tumu(
@@ -1650,7 +1650,7 @@ vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_tumu(
@@ -1659,7 +1659,7 @@ vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_tumu(
@@ -1668,7 +1668,7 @@ vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_tumu(
@@ -1677,7 +1677,7 @@ vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_tumu(
@@ -1686,7 +1686,7 @@ vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_tumu(
@@ -1695,7 +1695,7 @@ vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_tumu(
@@ -1704,7 +1704,7 @@ vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_tumu(
@@ -1713,7 +1713,7 @@ vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_tumu(
@@ -1722,7 +1722,7 @@ vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_tumu(
@@ -1731,7 +1731,7 @@ vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_tumu(
@@ -1740,7 +1740,7 @@ vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_tumu(
@@ -1749,7 +1749,7 @@ vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_tumu(
@@ -1758,7 +1758,7 @@ vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_tumu(
@@ -1767,7 +1767,7 @@ vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_tumu(
@@ -1776,7 +1776,7 @@ vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_tumu(
@@ -1785,7 +1785,7 @@ vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_tumu(
@@ -1794,7 +1794,7 @@ vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_tumu(
@@ -1803,7 +1803,7 @@ vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_tumu(
@@ -1812,7 +1812,7 @@ vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_tumu(
@@ -1821,7 +1821,7 @@ vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tumu(
@@ -1830,7 +1830,7 @@ vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tumu(
@@ -1839,7 +1839,7 @@ vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_tumu(
@@ -1848,7 +1848,7 @@ vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_tumu(
@@ -1857,7 +1857,7 @@ vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_tumu(
@@ -1866,7 +1866,7 @@ vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_tumu(
@@ -1875,7 +1875,7 @@ vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_tumu(
@@ -1884,7 +1884,7 @@ vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_tumu(
@@ -1893,7 +1893,7 @@ vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_tumu(
@@ -1902,7 +1902,7 @@ vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_tumu(
@@ -1911,7 +1911,7 @@ vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_tumu(
@@ -1920,7 +1920,7 @@ vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_tumu(
@@ -1929,7 +1929,7 @@ vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_tumu(
@@ -1938,7 +1938,7 @@ vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_tumu(
@@ -1947,7 +1947,7 @@ vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_tumu(
@@ -1956,7 +1956,7 @@ vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_tumu(
@@ -1965,7 +1965,7 @@ vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_tumu(
@@ -1974,7 +1974,7 @@ vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_tumu(
@@ -1983,7 +1983,7 @@ vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_tumu(
@@ -1992,7 +1992,7 @@ vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_tumu(
@@ -2001,7 +2001,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_tumu(
@@ -2010,7 +2010,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_tumu(
@@ -2019,7 +2019,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_tumu(
@@ -2028,7 +2028,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_tumu(
@@ -2037,7 +2037,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_tumu(
@@ -2046,7 +2046,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_tumu(
@@ -2055,7 +2055,7 @@ vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_tumu(
@@ -2064,7 +2064,7 @@ vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_tumu(
@@ -2073,7 +2073,7 @@ vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_tumu(
@@ -2082,7 +2082,7 @@ vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_tumu(
@@ -2091,7 +2091,7 @@ vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_tumu(
@@ -2100,7 +2100,7 @@ vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_tumu(
@@ -2109,7 +2109,7 @@ vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_tumu(
@@ -2118,7 +2118,7 @@ vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_tumu(
@@ -2127,7 +2127,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_tumu(
@@ -2136,7 +2136,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_tumu(
@@ -2145,7 +2145,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_tumu(
@@ -2154,7 +2154,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_tumu(
@@ -2163,7 +2163,7 @@ vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_tumu(
@@ -2172,7 +2172,7 @@ vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_tumu(
@@ -2181,7 +2181,7 @@ vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_tumu(
@@ -2190,7 +2190,7 @@ vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_tumu(
@@ -2199,7 +2199,7 @@ vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_tumu(
@@ -2208,7 +2208,7 @@ vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_tumu(
@@ -2217,7 +2217,7 @@ vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tumu(
@@ -2226,7 +2226,7 @@ vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tumu(
@@ -2235,7 +2235,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_tumu(
@@ -2244,7 +2244,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_tumu(
@@ -2253,7 +2253,7 @@ vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_tumu(
@@ -2262,7 +2262,7 @@ vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_tumu(
@@ -2271,7 +2271,7 @@ vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_tumu(
@@ -2280,7 +2280,7 @@ vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_tumu(
@@ -2289,7 +2289,7 @@ vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_tumu(
@@ -2298,7 +2298,7 @@ vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_tumu(
@@ -2307,7 +2307,7 @@ vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_tumu(
@@ -2316,7 +2316,7 @@ vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_tumu(
@@ -2325,7 +2325,7 @@ vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_tumu(
@@ -2334,7 +2334,7 @@ vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_tumu(
@@ -2343,7 +2343,7 @@ vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_tumu(
@@ -2352,7 +2352,7 @@ vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_tumu(
@@ -2361,7 +2361,7 @@ vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_tumu(
@@ -2370,7 +2370,7 @@ vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_tumu(
@@ -2379,7 +2379,7 @@ vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsll_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_mu(
@@ -2388,7 +2388,7 @@ vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_mu(
@@ -2397,7 +2397,7 @@ vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_mu(
@@ -2406,7 +2406,7 @@ vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_mu(
@@ -2415,7 +2415,7 @@ vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_mu(
@@ -2424,7 +2424,7 @@ vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_mu(
@@ -2433,7 +2433,7 @@ vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_mu(
@@ -2442,7 +2442,7 @@ vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_mu(
@@ -2451,7 +2451,7 @@ vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_mu(
@@ -2460,7 +2460,7 @@ vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_mu(
@@ -2469,7 +2469,7 @@ vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_mu(
@@ -2478,7 +2478,7 @@ vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_mu(
@@ -2487,7 +2487,7 @@ vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_mu(
@@ -2496,7 +2496,7 @@ vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_mu(
@@ -2505,7 +2505,7 @@ vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_mu(
@@ -2514,7 +2514,7 @@ vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_mu(
@@ -2523,7 +2523,7 @@ vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_mu(
@@ -2532,7 +2532,7 @@ vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_mu(
@@ -2541,7 +2541,7 @@ vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_mu(
@@ -2550,7 +2550,7 @@ vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_mu(
@@ -2559,7 +2559,7 @@ vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_mu(
@@ -2568,7 +2568,7 @@ vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_mu(
@@ -2577,7 +2577,7 @@ vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_mu(
@@ -2586,7 +2586,7 @@ vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_mu(
@@ -2595,7 +2595,7 @@ vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_mu(
@@ -2604,7 +2604,7 @@ vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_mu(
@@ -2613,7 +2613,7 @@ vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_mu(
@@ -2622,7 +2622,7 @@ vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_mu(
@@ -2631,7 +2631,7 @@ vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_mu(
@@ -2640,7 +2640,7 @@ vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_mu(
@@ -2649,7 +2649,7 @@ vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_mu(
@@ -2658,7 +2658,7 @@ vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_mu(
@@ -2667,7 +2667,7 @@ vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_mu(
@@ -2676,7 +2676,7 @@ vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_mu(
@@ -2685,7 +2685,7 @@ vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_mu(
@@ -2694,7 +2694,7 @@ vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_mu(
@@ -2703,7 +2703,7 @@ vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_mu(
@@ -2712,7 +2712,7 @@ vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_mu(
@@ -2721,7 +2721,7 @@ vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_mu(
@@ -2730,7 +2730,7 @@ vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_mu(
@@ -2739,7 +2739,7 @@ vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_mu(
@@ -2748,7 +2748,7 @@ vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_mu(
@@ -2757,7 +2757,7 @@ vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_mu(
@@ -2766,7 +2766,7 @@ vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_mu(
@@ -2775,7 +2775,7 @@ vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_mu(
@@ -2784,7 +2784,7 @@ vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_mu(
@@ -2793,7 +2793,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_mu(
@@ -2802,7 +2802,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_mu(
@@ -2811,7 +2811,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_mu(
@@ -2820,7 +2820,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_mu(
@@ -2829,7 +2829,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_mu(
@@ -2838,7 +2838,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_mu(
@@ -2847,7 +2847,7 @@ vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_mu(
@@ -2856,7 +2856,7 @@ vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_mu(
@@ -2865,7 +2865,7 @@ vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_mu(
@@ -2874,7 +2874,7 @@ vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_mu(
@@ -2883,7 +2883,7 @@ vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_mu(
@@ -2892,7 +2892,7 @@ vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_mu(
@@ -2901,7 +2901,7 @@ vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_mu(
@@ -2910,7 +2910,7 @@ vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_mu(
@@ -2919,7 +2919,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_mu(
@@ -2928,7 +2928,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_mu(
@@ -2937,7 +2937,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_mu(
@@ -2946,7 +2946,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_mu(
@@ -2955,7 +2955,7 @@ vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_mu(
@@ -2964,7 +2964,7 @@ vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_mu(
@@ -2973,7 +2973,7 @@ vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_mu(
@@ -2982,7 +2982,7 @@ vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_mu(
@@ -2991,7 +2991,7 @@ vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_mu(
@@ -3000,7 +3000,7 @@ vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_mu(
@@ -3009,7 +3009,7 @@ vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_mu(
@@ -3018,7 +3018,7 @@ vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_mu(
@@ -3027,7 +3027,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_mu(
@@ -3036,7 +3036,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_mu(
@@ -3045,7 +3045,7 @@ vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_mu(
@@ -3054,7 +3054,7 @@ vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_mu(
@@ -3063,7 +3063,7 @@ vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_mu(
@@ -3072,7 +3072,7 @@ vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_mu(
@@ -3081,7 +3081,7 @@ vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_mu(
@@ -3090,7 +3090,7 @@ vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_mu(
@@ -3099,7 +3099,7 @@ vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_mu(
@@ -3108,7 +3108,7 @@ vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_mu(
@@ -3117,7 +3117,7 @@ vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_mu(
@@ -3126,7 +3126,7 @@ vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_mu(
@@ -3135,7 +3135,7 @@ vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_mu(
@@ -3144,7 +3144,7 @@ vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_mu(
@@ -3153,7 +3153,7 @@ vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_mu(
@@ -3162,7 +3162,7 @@ vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_mu(
@@ -3171,6 +3171,6 @@ vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsll_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsmul.c
index e9c47d38dd66..a80a9a24e5a9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsmul.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsra.c
index 4ba841d0aae6..a8b5474778ca 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsra.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t sh
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t sh
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vsra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vsra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vsra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vsra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsrl.c
index 5f78ad85cbcb..98f0424764c2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsrl.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vsrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssra.c
index 4a88902d7264..b986d6d3b983 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssra.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t s
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssrl.c
index d3437f9e9374..0cf5cc3c9421 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssrl.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t sh
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssub.c
index 7d8604f353be..46aa744bdc91 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vssub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_tum(
@@ -408,7 +408,7 @@ vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_tum(
@@ -417,7 +417,7 @@ vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_tum(
@@ -426,7 +426,7 @@ vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_tum(
@@ -435,7 +435,7 @@ vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_tum(
@@ -444,7 +444,7 @@ vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_tum(
@@ -453,7 +453,7 @@ vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_tum(
@@ -462,7 +462,7 @@ vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_tum(
@@ -471,7 +471,7 @@ vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_tum(
@@ -480,7 +480,7 @@ vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_tum(
@@ -489,7 +489,7 @@ vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_tum(
@@ -498,7 +498,7 @@ vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_tum(
@@ -507,7 +507,7 @@ vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_tum(
@@ -516,7 +516,7 @@ vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_tum(
@@ -525,7 +525,7 @@ vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_tum(
@@ -534,7 +534,7 @@ vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_tum(
@@ -543,7 +543,7 @@ vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_tum(
@@ -552,7 +552,7 @@ vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_tum(
@@ -561,7 +561,7 @@ vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_tum(
@@ -570,7 +570,7 @@ vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_tum(
@@ -579,7 +579,7 @@ vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_tum(
@@ -588,7 +588,7 @@ vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_tum(
@@ -597,7 +597,7 @@ vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_tum(
@@ -606,7 +606,7 @@ vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_tum(
@@ -615,7 +615,7 @@ vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_tum(
@@ -624,7 +624,7 @@ vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_tum(
@@ -633,7 +633,7 @@ vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tum(
@@ -642,7 +642,7 @@ vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tum(
@@ -651,7 +651,7 @@ vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_tum(
@@ -660,7 +660,7 @@ vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_tum(
@@ -669,7 +669,7 @@ vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_tum(
@@ -678,7 +678,7 @@ vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_tum(
@@ -687,7 +687,7 @@ vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_tum(
@@ -696,7 +696,7 @@ vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_tum(
@@ -705,7 +705,7 @@ vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_tum(
@@ -714,7 +714,7 @@ vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_tum(
@@ -723,7 +723,7 @@ vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_tum(
@@ -732,7 +732,7 @@ vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_tum(
@@ -741,7 +741,7 @@ vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_tum(
@@ -750,7 +750,7 @@ vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_tum(
@@ -759,7 +759,7 @@ vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_tum(
@@ -768,7 +768,7 @@ vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_tum(
@@ -777,7 +777,7 @@ vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_tum(
@@ -786,7 +786,7 @@ vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_tum(
@@ -795,7 +795,7 @@ vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vssub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_tumu(
@@ -804,7 +804,7 @@ vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_tumu(
@@ -813,7 +813,7 @@ vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_tumu(
@@ -822,7 +822,7 @@ vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_tumu(
@@ -831,7 +831,7 @@ vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_tumu(
@@ -840,7 +840,7 @@ vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_tumu(
@@ -849,7 +849,7 @@ vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_tumu(
@@ -858,7 +858,7 @@ vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_tumu(
@@ -867,7 +867,7 @@ vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_tumu(
@@ -876,7 +876,7 @@ vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_tumu(
@@ -885,7 +885,7 @@ vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_tumu(
@@ -894,7 +894,7 @@ vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_tumu(
@@ -903,7 +903,7 @@ vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_tumu(
@@ -912,7 +912,7 @@ vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_tumu(
@@ -921,7 +921,7 @@ vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_tumu(
@@ -930,7 +930,7 @@ vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_tumu(
@@ -939,7 +939,7 @@ vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_tumu(
@@ -948,7 +948,7 @@ vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_tumu(
@@ -957,7 +957,7 @@ vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_tumu(
@@ -966,7 +966,7 @@ vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_tumu(
@@ -975,7 +975,7 @@ vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_tumu(
@@ -984,7 +984,7 @@ vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_tumu(
@@ -993,7 +993,7 @@ vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_tumu(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_tumu(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_tumu(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_tumu(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tumu(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tumu(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_tumu(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_tumu(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_tumu(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_tumu(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_tumu(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_tumu(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_tumu(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_tumu(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_tumu(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_tumu(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_tumu(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_tumu(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_tumu(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_tumu(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_tumu(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_tumu(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vssub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_mu(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_mu(
@@ -1209,7 +1209,7 @@ vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_mu(
@@ -1218,7 +1218,7 @@ vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_mu(
@@ -1227,7 +1227,7 @@ vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_mu(
@@ -1236,7 +1236,7 @@ vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_mu(
@@ -1245,7 +1245,7 @@ vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_mu(
@@ -1254,7 +1254,7 @@ vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_mu(
@@ -1263,7 +1263,7 @@ vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_mu(
@@ -1272,7 +1272,7 @@ vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_mu(
@@ -1281,7 +1281,7 @@ vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_mu(
@@ -1290,7 +1290,7 @@ vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_mu(
@@ -1299,7 +1299,7 @@ vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_mu(
@@ -1308,7 +1308,7 @@ vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_mu(
@@ -1317,7 +1317,7 @@ vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_mu(
@@ -1326,7 +1326,7 @@ vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_mu(
@@ -1335,7 +1335,7 @@ vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_mu(
@@ -1344,7 +1344,7 @@ vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_mu(
@@ -1353,7 +1353,7 @@ vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_mu(
@@ -1362,7 +1362,7 @@ vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_mu(
@@ -1371,7 +1371,7 @@ vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_mu(
@@ -1380,7 +1380,7 @@ vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_mu(
@@ -1389,7 +1389,7 @@ vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_mu(
@@ -1398,7 +1398,7 @@ vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_mu(
@@ -1407,7 +1407,7 @@ vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_mu(
@@ -1416,7 +1416,7 @@ vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_mu(
@@ -1425,7 +1425,7 @@ vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_mu(
@@ -1434,7 +1434,7 @@ vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_mu(
@@ -1443,7 +1443,7 @@ vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_mu(
@@ -1452,7 +1452,7 @@ vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_mu(
@@ -1461,7 +1461,7 @@ vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_mu(
@@ -1470,7 +1470,7 @@ vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_mu(
@@ -1479,7 +1479,7 @@ vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_mu(
@@ -1488,7 +1488,7 @@ vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_mu(
@@ -1497,7 +1497,7 @@ vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_mu(
@@ -1506,7 +1506,7 @@ vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_mu(
@@ -1515,7 +1515,7 @@ vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_mu(
@@ -1524,7 +1524,7 @@ vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_mu(
@@ -1533,7 +1533,7 @@ vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_mu(
@@ -1542,7 +1542,7 @@ vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_mu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_mu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_mu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_mu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_mu(
@@ -1587,6 +1587,6 @@ vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vssub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssubu.c
index 8ff3b333f79b..d60ff55ef6a1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssubu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vssubu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_tu(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_tu(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_tu(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_tu(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_tu(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_tu(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_tu(
@@ -75,7 +75,7 @@ vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_tu(
@@ -84,7 +84,7 @@ vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_tu(
@@ -93,7 +93,7 @@ vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_tu(
@@ -102,7 +102,7 @@ vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_tu(
@@ -111,7 +111,7 @@ vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_tu(
@@ -120,7 +120,7 @@ vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_tu(
@@ -129,7 +129,7 @@ vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_tu(
@@ -138,7 +138,7 @@ vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_tu(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_tu(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_tu(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_tu(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_tu(
@@ -183,7 +183,7 @@ vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_tu(
@@ -192,7 +192,7 @@ vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_tu(
@@ -201,7 +201,7 @@ vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_tu(
@@ -210,7 +210,7 @@ vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_tu(
@@ -219,7 +219,7 @@ vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_tu(
@@ -228,7 +228,7 @@ vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_tu(
@@ -237,7 +237,7 @@ vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_tu(
@@ -291,7 +291,7 @@ vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_tu(
@@ -300,7 +300,7 @@ vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_tu(
@@ -309,7 +309,7 @@ vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_tu(
@@ -318,7 +318,7 @@ vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_tu(
@@ -327,7 +327,7 @@ vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_tu(
@@ -336,7 +336,7 @@ vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_tu(
@@ -345,7 +345,7 @@ vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_tu(
@@ -354,7 +354,7 @@ vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_tu(
@@ -363,7 +363,7 @@ vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_tu(
@@ -372,7 +372,7 @@ vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_tu(
@@ -381,7 +381,7 @@ vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_tu(
@@ -390,7 +390,7 @@ vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_tu(
@@ -399,7 +399,7 @@ vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vssubu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_tum(
@@ -408,7 +408,7 @@ vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_tum(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_tum(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_tum(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_tum(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_tum(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_tum(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_tum(
@@ -471,7 +471,7 @@ vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_tum(
@@ -480,7 +480,7 @@ vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_tum(
@@ -489,7 +489,7 @@ vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_tum(
@@ -498,7 +498,7 @@ vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_tum(
@@ -507,7 +507,7 @@ vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_tum(
@@ -516,7 +516,7 @@ vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_tum(
@@ -525,7 +525,7 @@ vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_tum(
@@ -534,7 +534,7 @@ vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_tum(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_tum(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_tum(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_tum(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_tum(
@@ -579,7 +579,7 @@ vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_tum(
@@ -588,7 +588,7 @@ vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_tum(
@@ -597,7 +597,7 @@ vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_tum(
@@ -606,7 +606,7 @@ vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_tum(
@@ -615,7 +615,7 @@ vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_tum(
@@ -624,7 +624,7 @@ vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_tum(
@@ -633,7 +633,7 @@ vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tum(
@@ -642,7 +642,7 @@ vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tum(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_tum(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_tum(
@@ -669,7 +669,7 @@ vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_tum(
@@ -678,7 +678,7 @@ vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_tum(
@@ -687,7 +687,7 @@ vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_tum(
@@ -696,7 +696,7 @@ vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_tum(
@@ -705,7 +705,7 @@ vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_tum(
@@ -714,7 +714,7 @@ vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_tum(
@@ -723,7 +723,7 @@ vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_tum(
@@ -732,7 +732,7 @@ vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_tum(
@@ -741,7 +741,7 @@ vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_tum(
@@ -750,7 +750,7 @@ vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_tum(
@@ -759,7 +759,7 @@ vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_tum(
@@ -768,7 +768,7 @@ vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_tum(
@@ -777,7 +777,7 @@ vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_tum(
@@ -786,7 +786,7 @@ vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_tum(
@@ -795,7 +795,7 @@ vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vssubu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_tumu(
@@ -804,7 +804,7 @@ vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_tumu(
@@ -813,7 +813,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_tumu(
@@ -822,7 +822,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_tumu(
@@ -831,7 +831,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_tumu(
@@ -840,7 +840,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_tumu(
@@ -849,7 +849,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_tumu(
@@ -858,7 +858,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_tumu(
@@ -867,7 +867,7 @@ vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_tumu(
@@ -876,7 +876,7 @@ vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_tumu(
@@ -885,7 +885,7 @@ vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_tumu(
@@ -894,7 +894,7 @@ vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_tumu(
@@ -903,7 +903,7 @@ vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_tumu(
@@ -912,7 +912,7 @@ vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_tumu(
@@ -921,7 +921,7 @@ vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_tumu(
@@ -930,7 +930,7 @@ vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_tumu(
@@ -939,7 +939,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_tumu(
@@ -948,7 +948,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_tumu(
@@ -957,7 +957,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_tumu(
@@ -966,7 +966,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_tumu(
@@ -975,7 +975,7 @@ vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_tumu(
@@ -984,7 +984,7 @@ vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_tumu(
@@ -993,7 +993,7 @@ vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_tumu(
@@ -1002,7 +1002,7 @@ vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_tumu(
@@ -1011,7 +1011,7 @@ vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_tumu(
@@ -1020,7 +1020,7 @@ vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_tumu(
@@ -1029,7 +1029,7 @@ vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tumu(
@@ -1038,7 +1038,7 @@ vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tumu(
@@ -1047,7 +1047,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_tumu(
@@ -1056,7 +1056,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_tumu(
@@ -1065,7 +1065,7 @@ vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_tumu(
@@ -1074,7 +1074,7 @@ vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_tumu(
@@ -1083,7 +1083,7 @@ vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_tumu(
@@ -1092,7 +1092,7 @@ vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_tumu(
@@ -1101,7 +1101,7 @@ vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_tumu(
@@ -1110,7 +1110,7 @@ vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_tumu(
@@ -1119,7 +1119,7 @@ vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_tumu(
@@ -1128,7 +1128,7 @@ vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_tumu(
@@ -1137,7 +1137,7 @@ vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_tumu(
@@ -1146,7 +1146,7 @@ vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_tumu(
@@ -1155,7 +1155,7 @@ vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_tumu(
@@ -1164,7 +1164,7 @@ vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_tumu(
@@ -1173,7 +1173,7 @@ vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_tumu(
@@ -1182,7 +1182,7 @@ vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_tumu(
@@ -1191,7 +1191,7 @@ vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vssubu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_mu(
@@ -1200,7 +1200,7 @@ vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_mu(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_mu(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_mu(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_mu(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_mu(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_mu(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_mu(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_mu(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_mu(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_mu(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_mu(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_mu(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_mu(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_mu(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_mu(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_mu(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_mu(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_mu(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_mu(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_mu(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_mu(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_mu(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_mu(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_mu(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_mu(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_mu(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_mu(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_mu(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_mu(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_mu(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_mu(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_mu(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_mu(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_mu(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_mu(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_mu(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_mu(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_mu(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_mu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_mu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_mu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_mu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_mu(
@@ -1587,6 +1587,6 @@ vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vssubu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsub.c
index ad725ac9798f..62119568a741 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_tu(
@@ -408,7 +408,7 @@ vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_tu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_tu(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_tu(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_tu(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_tu(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_tu(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_tu(
@@ -471,7 +471,7 @@ vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_tu(
@@ -480,7 +480,7 @@ vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_tu(
@@ -489,7 +489,7 @@ vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_tu(
@@ -498,7 +498,7 @@ vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_tu(
@@ -507,7 +507,7 @@ vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_tu(
@@ -516,7 +516,7 @@ vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_tu(
@@ -525,7 +525,7 @@ vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_tu(
@@ -534,7 +534,7 @@ vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_tu(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_tu(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_tu(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_tu(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_tu(
@@ -579,7 +579,7 @@ vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_tu(
@@ -588,7 +588,7 @@ vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_tu(
@@ -597,7 +597,7 @@ vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_tu(
@@ -606,7 +606,7 @@ vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_tu(
@@ -615,7 +615,7 @@ vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_tu(
@@ -624,7 +624,7 @@ vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_tu(
@@ -633,7 +633,7 @@ vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tu(
@@ -642,7 +642,7 @@ vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tu(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_tu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_tu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_tu(
@@ -678,7 +678,7 @@ vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_tu(
@@ -687,7 +687,7 @@ vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_tu(
@@ -696,7 +696,7 @@ vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_tu(
@@ -705,7 +705,7 @@ vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_tu(
@@ -714,7 +714,7 @@ vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_tu(
@@ -723,7 +723,7 @@ vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_tu(
@@ -732,7 +732,7 @@ vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_tu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_tu(
@@ -750,7 +750,7 @@ vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_tu(
@@ -759,7 +759,7 @@ vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_tu(
@@ -768,7 +768,7 @@ vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_tu(
@@ -777,7 +777,7 @@ vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_tu(
@@ -786,7 +786,7 @@ vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_tu(
@@ -795,7 +795,7 @@ vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsub_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsub_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_tum(
@@ -804,7 +804,7 @@ vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_tum(
@@ -813,7 +813,7 @@ vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_tum(
@@ -822,7 +822,7 @@ vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_tum(
@@ -831,7 +831,7 @@ vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_tum(
@@ -840,7 +840,7 @@ vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_tum(
@@ -849,7 +849,7 @@ vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_tum(
@@ -858,7 +858,7 @@ vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_tum(
@@ -867,7 +867,7 @@ vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_tum(
@@ -876,7 +876,7 @@ vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_tum(
@@ -885,7 +885,7 @@ vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_tum(
@@ -894,7 +894,7 @@ vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_tum(
@@ -903,7 +903,7 @@ vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_tum(
@@ -912,7 +912,7 @@ vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_tum(
@@ -921,7 +921,7 @@ vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_tum(
@@ -930,7 +930,7 @@ vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_tum(
@@ -939,7 +939,7 @@ vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_tum(
@@ -948,7 +948,7 @@ vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_tum(
@@ -957,7 +957,7 @@ vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_tum(
@@ -966,7 +966,7 @@ vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_tum(
@@ -975,7 +975,7 @@ vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_tum(
@@ -984,7 +984,7 @@ vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_tum(
@@ -993,7 +993,7 @@ vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_tum(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_tum(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_tum(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_tum(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tum(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tum(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_tum(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_tum(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_tum(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_tum(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_tum(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_tum(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_tum(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_tum(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_tum(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_tum(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_tum(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_tum(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_tum(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_tum(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_tum(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_tum(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_tum(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_tum(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_tum(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_tum(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_tum(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_tum(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_tum(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_tum(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_tum(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_tum(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_tum(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_tum(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_tum(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_tum(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_tum(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_tum(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_tum(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_tum(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_tum(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_tum(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_tum(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_tum(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_tum(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_tum(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_tum(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_tum(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tum(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tum(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_tum(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_tum(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_tum(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_tum(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_tum(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_tum(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_tum(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_tum(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_tum(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_tum(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_tum(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_tum(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_tum(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_tum(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_tum(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_tum(
@@ -1587,7 +1587,7 @@ vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsub_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_tumu(
@@ -1596,7 +1596,7 @@ vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_tumu(
@@ -1605,7 +1605,7 @@ vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_tumu(
@@ -1614,7 +1614,7 @@ vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_tumu(
@@ -1623,7 +1623,7 @@ vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_tumu(
@@ -1632,7 +1632,7 @@ vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_tumu(
@@ -1641,7 +1641,7 @@ vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_tumu(
@@ -1650,7 +1650,7 @@ vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_tumu(
@@ -1659,7 +1659,7 @@ vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_tumu(
@@ -1668,7 +1668,7 @@ vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_tumu(
@@ -1677,7 +1677,7 @@ vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_tumu(
@@ -1686,7 +1686,7 @@ vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_tumu(
@@ -1695,7 +1695,7 @@ vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_tumu(
@@ -1704,7 +1704,7 @@ vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_tumu(
@@ -1713,7 +1713,7 @@ vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_tumu(
@@ -1722,7 +1722,7 @@ vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_tumu(
@@ -1731,7 +1731,7 @@ vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_tumu(
@@ -1740,7 +1740,7 @@ vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_tumu(
@@ -1749,7 +1749,7 @@ vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_tumu(
@@ -1758,7 +1758,7 @@ vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_tumu(
@@ -1767,7 +1767,7 @@ vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_tumu(
@@ -1776,7 +1776,7 @@ vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_tumu(
@@ -1785,7 +1785,7 @@ vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_tumu(
@@ -1794,7 +1794,7 @@ vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_tumu(
@@ -1803,7 +1803,7 @@ vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_tumu(
@@ -1812,7 +1812,7 @@ vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_tumu(
@@ -1821,7 +1821,7 @@ vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tumu(
@@ -1830,7 +1830,7 @@ vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tumu(
@@ -1839,7 +1839,7 @@ vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_tumu(
@@ -1848,7 +1848,7 @@ vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_tumu(
@@ -1857,7 +1857,7 @@ vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_tumu(
@@ -1866,7 +1866,7 @@ vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_tumu(
@@ -1875,7 +1875,7 @@ vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_tumu(
@@ -1884,7 +1884,7 @@ vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_tumu(
@@ -1893,7 +1893,7 @@ vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_tumu(
@@ -1902,7 +1902,7 @@ vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_tumu(
@@ -1911,7 +1911,7 @@ vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_tumu(
@@ -1920,7 +1920,7 @@ vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_tumu(
@@ -1929,7 +1929,7 @@ vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_tumu(
@@ -1938,7 +1938,7 @@ vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_tumu(
@@ -1947,7 +1947,7 @@ vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_tumu(
@@ -1956,7 +1956,7 @@ vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_tumu(
@@ -1965,7 +1965,7 @@ vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_tumu(
@@ -1974,7 +1974,7 @@ vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_tumu(
@@ -1983,7 +1983,7 @@ vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_tumu(
@@ -1992,7 +1992,7 @@ vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_tumu(
@@ -2001,7 +2001,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_tumu(
@@ -2010,7 +2010,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_tumu(
@@ -2019,7 +2019,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_tumu(
@@ -2028,7 +2028,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_tumu(
@@ -2037,7 +2037,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_tumu(
@@ -2046,7 +2046,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_tumu(
@@ -2055,7 +2055,7 @@ vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_tumu(
@@ -2064,7 +2064,7 @@ vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_tumu(
@@ -2073,7 +2073,7 @@ vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_tumu(
@@ -2082,7 +2082,7 @@ vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_tumu(
@@ -2091,7 +2091,7 @@ vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_tumu(
@@ -2100,7 +2100,7 @@ vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_tumu(
@@ -2109,7 +2109,7 @@ vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_tumu(
@@ -2118,7 +2118,7 @@ vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_tumu(
@@ -2127,7 +2127,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_tumu(
@@ -2136,7 +2136,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_tumu(
@@ -2145,7 +2145,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_tumu(
@@ -2154,7 +2154,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_tumu(
@@ -2163,7 +2163,7 @@ vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_tumu(
@@ -2172,7 +2172,7 @@ vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_tumu(
@@ -2181,7 +2181,7 @@ vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_tumu(
@@ -2190,7 +2190,7 @@ vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_tumu(
@@ -2199,7 +2199,7 @@ vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_tumu(
@@ -2208,7 +2208,7 @@ vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_tumu(
@@ -2217,7 +2217,7 @@ vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tumu(
@@ -2226,7 +2226,7 @@ vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tumu(
@@ -2235,7 +2235,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_tumu(
@@ -2244,7 +2244,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_tumu(
@@ -2253,7 +2253,7 @@ vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_tumu(
@@ -2262,7 +2262,7 @@ vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_tumu(
@@ -2271,7 +2271,7 @@ vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_tumu(
@@ -2280,7 +2280,7 @@ vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_tumu(
@@ -2289,7 +2289,7 @@ vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_tumu(
@@ -2298,7 +2298,7 @@ vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_tumu(
@@ -2307,7 +2307,7 @@ vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_tumu(
@@ -2316,7 +2316,7 @@ vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_tumu(
@@ -2325,7 +2325,7 @@ vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_tumu(
@@ -2334,7 +2334,7 @@ vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_tumu(
@@ -2343,7 +2343,7 @@ vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_tumu(
@@ -2352,7 +2352,7 @@ vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_tumu(
@@ -2361,7 +2361,7 @@ vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_tumu(
@@ -2370,7 +2370,7 @@ vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_tumu(
@@ -2379,7 +2379,7 @@ vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsub_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_mu(
@@ -2388,7 +2388,7 @@ vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_mu(
@@ -2397,7 +2397,7 @@ vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_mu(
@@ -2406,7 +2406,7 @@ vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_mu(
@@ -2415,7 +2415,7 @@ vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_mu(
@@ -2424,7 +2424,7 @@ vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_mu(
@@ -2433,7 +2433,7 @@ vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_mu(
@@ -2442,7 +2442,7 @@ vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_mu(
@@ -2451,7 +2451,7 @@ vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_mu(
@@ -2460,7 +2460,7 @@ vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_mu(
@@ -2469,7 +2469,7 @@ vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_mu(
@@ -2478,7 +2478,7 @@ vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_mu(
@@ -2487,7 +2487,7 @@ vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_mu(
@@ -2496,7 +2496,7 @@ vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_mu(
@@ -2505,7 +2505,7 @@ vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_mu(
@@ -2514,7 +2514,7 @@ vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_mu(
@@ -2523,7 +2523,7 @@ vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_mu(
@@ -2532,7 +2532,7 @@ vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_mu(
@@ -2541,7 +2541,7 @@ vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_mu(
@@ -2550,7 +2550,7 @@ vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_mu(
@@ -2559,7 +2559,7 @@ vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_mu(
@@ -2568,7 +2568,7 @@ vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_mu(
@@ -2577,7 +2577,7 @@ vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_mu(
@@ -2586,7 +2586,7 @@ vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_mu(
@@ -2595,7 +2595,7 @@ vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_mu(
@@ -2604,7 +2604,7 @@ vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_mu(
@@ -2613,7 +2613,7 @@ vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_mu(
@@ -2622,7 +2622,7 @@ vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_mu(
@@ -2631,7 +2631,7 @@ vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_mu(
@@ -2640,7 +2640,7 @@ vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_mu(
@@ -2649,7 +2649,7 @@ vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_mu(
@@ -2658,7 +2658,7 @@ vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_mu(
@@ -2667,7 +2667,7 @@ vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_mu(
@@ -2676,7 +2676,7 @@ vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_mu(
@@ -2685,7 +2685,7 @@ vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_mu(
@@ -2694,7 +2694,7 @@ vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_mu(
@@ -2703,7 +2703,7 @@ vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_mu(
@@ -2712,7 +2712,7 @@ vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_mu(
@@ -2721,7 +2721,7 @@ vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_mu(
@@ -2730,7 +2730,7 @@ vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_mu(
@@ -2739,7 +2739,7 @@ vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_mu(
@@ -2748,7 +2748,7 @@ vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_mu(
@@ -2757,7 +2757,7 @@ vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_mu(
@@ -2766,7 +2766,7 @@ vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_mu(
@@ -2775,7 +2775,7 @@ vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_mu(
@@ -2784,7 +2784,7 @@ vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_mu(
@@ -2793,7 +2793,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_mu(
@@ -2802,7 +2802,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_mu(
@@ -2811,7 +2811,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_mu(
@@ -2820,7 +2820,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_mu(
@@ -2829,7 +2829,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_mu(
@@ -2838,7 +2838,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_mu(
@@ -2847,7 +2847,7 @@ vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_mu(
@@ -2856,7 +2856,7 @@ vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_mu(
@@ -2865,7 +2865,7 @@ vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_mu(
@@ -2874,7 +2874,7 @@ vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_mu(
@@ -2883,7 +2883,7 @@ vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_mu(
@@ -2892,7 +2892,7 @@ vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_mu(
@@ -2901,7 +2901,7 @@ vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_mu(
@@ -2910,7 +2910,7 @@ vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_mu(
@@ -2919,7 +2919,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_mu(
@@ -2928,7 +2928,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_mu(
@@ -2937,7 +2937,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_mu(
@@ -2946,7 +2946,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_mu(
@@ -2955,7 +2955,7 @@ vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_mu(
@@ -2964,7 +2964,7 @@ vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_mu(
@@ -2973,7 +2973,7 @@ vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_mu(
@@ -2982,7 +2982,7 @@ vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_mu(
@@ -2991,7 +2991,7 @@ vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_mu(
@@ -3000,7 +3000,7 @@ vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_mu(
@@ -3009,7 +3009,7 @@ vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_mu(
@@ -3018,7 +3018,7 @@ vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_mu(
@@ -3027,7 +3027,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_mu(
@@ -3036,7 +3036,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_mu(
@@ -3045,7 +3045,7 @@ vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_mu(
@@ -3054,7 +3054,7 @@ vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_mu(
@@ -3063,7 +3063,7 @@ vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_mu(
@@ -3072,7 +3072,7 @@ vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_mu(
@@ -3081,7 +3081,7 @@ vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_mu(
@@ -3090,7 +3090,7 @@ vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_mu(
@@ -3099,7 +3099,7 @@ vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_mu(
@@ -3108,7 +3108,7 @@ vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_mu(
@@ -3117,7 +3117,7 @@ vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_mu(
@@ -3126,7 +3126,7 @@ vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_mu(
@@ -3135,7 +3135,7 @@ vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_mu(
@@ -3144,7 +3144,7 @@ vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_mu(
@@ -3153,7 +3153,7 @@ vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_mu(
@@ -3162,7 +3162,7 @@ vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_mu(
@@ -3171,6 +3171,6 @@ vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vsub_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwadd.c
index c8badf28ef70..c43a1b763506 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwadd.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_tu(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_tu(
@@ -30,7 +30,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_tu(
@@ -39,7 +39,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_tu(
@@ -48,7 +48,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_tu(
@@ -57,7 +57,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_tu(
@@ -66,7 +66,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_tu(
@@ -75,7 +75,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_tu(
@@ -84,7 +84,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_tu(
@@ -93,7 +93,7 @@ vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_tu(
@@ -111,7 +111,7 @@ vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_tu(
@@ -120,7 +120,7 @@ vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_tu(
@@ -129,7 +129,7 @@ vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_tu(
@@ -138,7 +138,7 @@ vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_tu(
@@ -147,7 +147,7 @@ vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_tu(
@@ -156,7 +156,7 @@ vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_tu(
@@ -165,7 +165,7 @@ vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_tu(
@@ -174,7 +174,7 @@ vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_tu(
@@ -183,7 +183,7 @@ vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_tu(
@@ -192,7 +192,7 @@ vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_tu(
@@ -201,7 +201,7 @@ vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_tu(
@@ -210,7 +210,7 @@ vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_tu(
@@ -219,7 +219,7 @@ vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_tu(
@@ -228,7 +228,7 @@ vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_tu(
@@ -237,7 +237,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_tu(
@@ -291,7 +291,7 @@ vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_tu(
@@ -300,7 +300,7 @@ vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_tu(
@@ -309,7 +309,7 @@ vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_tu(
@@ -318,7 +318,7 @@ vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_tu(
@@ -327,7 +327,7 @@ vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_tu(
@@ -336,7 +336,7 @@ vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_tu(
@@ -345,7 +345,7 @@ vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_tu(
@@ -354,7 +354,7 @@ vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_tu(
@@ -363,7 +363,7 @@ vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_tu(
@@ -372,7 +372,7 @@ vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_tu(
@@ -381,7 +381,7 @@ vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_tu(
@@ -390,7 +390,7 @@ vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_tu(
@@ -399,7 +399,7 @@ vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tu(
@@ -408,7 +408,7 @@ vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tu(
@@ -417,7 +417,7 @@ vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tu(
@@ -426,7 +426,7 @@ vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tu(
@@ -435,7 +435,7 @@ vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_tu(
@@ -444,7 +444,7 @@ vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_tu(
@@ -453,7 +453,7 @@ vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_tu(
@@ -462,7 +462,7 @@ vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_tu(
@@ -471,7 +471,7 @@ vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_tu(
@@ -480,7 +480,7 @@ vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_tu(
@@ -489,7 +489,7 @@ vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_tu(
@@ -498,7 +498,7 @@ vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_tu(
@@ -507,7 +507,7 @@ vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_tu(
@@ -516,7 +516,7 @@ vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_tu(
@@ -525,7 +525,7 @@ vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_tu(
@@ -534,7 +534,7 @@ vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_tu(
@@ -543,7 +543,7 @@ vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_tum(
@@ -552,7 +552,7 @@ vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_tum(
@@ -561,7 +561,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_tum(
@@ -570,7 +570,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_tum(
@@ -579,7 +579,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_tum(
@@ -588,7 +588,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_tum(
@@ -597,7 +597,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_tum(
@@ -606,7 +606,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_tum(
@@ -615,7 +615,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_tum(
@@ -624,7 +624,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_tum(
@@ -633,7 +633,7 @@ vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_tum(
@@ -642,7 +642,7 @@ vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_tum(
@@ -651,7 +651,7 @@ vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_tum(
@@ -660,7 +660,7 @@ vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_tum(
@@ -669,7 +669,7 @@ vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_tum(
@@ -678,7 +678,7 @@ vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_tum(
@@ -687,7 +687,7 @@ vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_tum(
@@ -696,7 +696,7 @@ vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_tum(
@@ -705,7 +705,7 @@ vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_tum(
@@ -714,7 +714,7 @@ vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_tum(
@@ -723,7 +723,7 @@ vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_tum(
@@ -732,7 +732,7 @@ vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_tum(
@@ -741,7 +741,7 @@ vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_tum(
@@ -750,7 +750,7 @@ vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_tum(
@@ -759,7 +759,7 @@ vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_tum(
@@ -768,7 +768,7 @@ vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_tum(
@@ -777,7 +777,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_tum(
@@ -786,7 +786,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_tum(
@@ -795,7 +795,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_tum(
@@ -804,7 +804,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_tum(
@@ -813,7 +813,7 @@ vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_tum(
@@ -822,7 +822,7 @@ vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_tum(
@@ -831,7 +831,7 @@ vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_tum(
@@ -840,7 +840,7 @@ vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_tum(
@@ -849,7 +849,7 @@ vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_tum(
@@ -858,7 +858,7 @@ vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_tum(
@@ -867,7 +867,7 @@ vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_tum(
@@ -876,7 +876,7 @@ vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_tum(
@@ -885,7 +885,7 @@ vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_tum(
@@ -894,7 +894,7 @@ vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_tum(
@@ -903,7 +903,7 @@ vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_tum(
@@ -912,7 +912,7 @@ vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_tum(
@@ -921,7 +921,7 @@ vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_tum(
@@ -930,7 +930,7 @@ vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_tum(
@@ -939,7 +939,7 @@ vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tum(
@@ -948,7 +948,7 @@ vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tum(
@@ -957,7 +957,7 @@ vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tum(
@@ -966,7 +966,7 @@ vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tum(
@@ -975,7 +975,7 @@ vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_tum(
@@ -984,7 +984,7 @@ vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_tum(
@@ -993,7 +993,7 @@ vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_tum(
@@ -1002,7 +1002,7 @@ vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_tum(
@@ -1011,7 +1011,7 @@ vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_tum(
@@ -1020,7 +1020,7 @@ vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_tum(
@@ -1029,7 +1029,7 @@ vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_tum(
@@ -1038,7 +1038,7 @@ vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_tum(
@@ -1047,7 +1047,7 @@ vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_tum(
@@ -1056,7 +1056,7 @@ vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_tum(
@@ -1065,7 +1065,7 @@ vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_tum(
@@ -1074,7 +1074,7 @@ vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_tum(
@@ -1083,7 +1083,7 @@ vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_tumu(
@@ -1092,7 +1092,7 @@ vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_tumu(
@@ -1101,7 +1101,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_tumu(
@@ -1110,7 +1110,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_tumu(
@@ -1119,7 +1119,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_tumu(
@@ -1128,7 +1128,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_tumu(
@@ -1137,7 +1137,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_tumu(
@@ -1146,7 +1146,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_tumu(
@@ -1155,7 +1155,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_tumu(
@@ -1164,7 +1164,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_tumu(
@@ -1173,7 +1173,7 @@ vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_tumu(
@@ -1182,7 +1182,7 @@ vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_tumu(
@@ -1191,7 +1191,7 @@ vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_tumu(
@@ -1200,7 +1200,7 @@ vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_tumu(
@@ -1209,7 +1209,7 @@ vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_tumu(
@@ -1218,7 +1218,7 @@ vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_tumu(
@@ -1227,7 +1227,7 @@ vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_tumu(
@@ -1236,7 +1236,7 @@ vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_tumu(
@@ -1245,7 +1245,7 @@ vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_tumu(
@@ -1254,7 +1254,7 @@ vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_tumu(
@@ -1263,7 +1263,7 @@ vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_tumu(
@@ -1272,7 +1272,7 @@ vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_tumu(
@@ -1281,7 +1281,7 @@ vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_tumu(
@@ -1290,7 +1290,7 @@ vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_tumu(
@@ -1299,7 +1299,7 @@ vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_tumu(
@@ -1308,7 +1308,7 @@ vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_tumu(
@@ -1317,7 +1317,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_tumu(
@@ -1326,7 +1326,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_tumu(
@@ -1335,7 +1335,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_tumu(
@@ -1344,7 +1344,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_tumu(
@@ -1353,7 +1353,7 @@ vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_tumu(
@@ -1362,7 +1362,7 @@ vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_tumu(
@@ -1371,7 +1371,7 @@ vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_tumu(
@@ -1380,7 +1380,7 @@ vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_tumu(
@@ -1389,7 +1389,7 @@ vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_tumu(
@@ -1398,7 +1398,7 @@ vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_tumu(
@@ -1407,7 +1407,7 @@ vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_tumu(
@@ -1416,7 +1416,7 @@ vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_tumu(
@@ -1425,7 +1425,7 @@ vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_tumu(
@@ -1434,7 +1434,7 @@ vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_tumu(
@@ -1443,7 +1443,7 @@ vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_tumu(
@@ -1452,7 +1452,7 @@ vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_tumu(
@@ -1461,7 +1461,7 @@ vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_tumu(
@@ -1470,7 +1470,7 @@ vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_tumu(
@@ -1479,7 +1479,7 @@ vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tumu(
@@ -1488,7 +1488,7 @@ vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tumu(
@@ -1497,7 +1497,7 @@ vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tumu(
@@ -1506,7 +1506,7 @@ vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tumu(
@@ -1515,7 +1515,7 @@ vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_tumu(
@@ -1524,7 +1524,7 @@ vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_tumu(
@@ -1533,7 +1533,7 @@ vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_tumu(
@@ -1542,7 +1542,7 @@ vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_tumu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_tumu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_tumu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_tumu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_tumu(
@@ -1587,7 +1587,7 @@ vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_tumu(
@@ -1596,7 +1596,7 @@ vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_tumu(
@@ -1605,7 +1605,7 @@ vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_tumu(
@@ -1614,7 +1614,7 @@ vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_tumu(
@@ -1623,7 +1623,7 @@ vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_mu(
@@ -1632,7 +1632,7 @@ vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_mu(
@@ -1641,7 +1641,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_mu(
@@ -1650,7 +1650,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_mu(
@@ -1659,7 +1659,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_mu(
@@ -1668,7 +1668,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_mu(
@@ -1677,7 +1677,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_mu(
@@ -1686,7 +1686,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_mu(
@@ -1695,7 +1695,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_mu(
@@ -1704,7 +1704,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_mu(
@@ -1713,7 +1713,7 @@ vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_mu(
@@ -1722,7 +1722,7 @@ vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_mu(
@@ -1731,7 +1731,7 @@ vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_mu(
@@ -1740,7 +1740,7 @@ vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_mu(
@@ -1749,7 +1749,7 @@ vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_mu(
@@ -1758,7 +1758,7 @@ vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_mu(
@@ -1767,7 +1767,7 @@ vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_mu(
@@ -1776,7 +1776,7 @@ vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_mu(
@@ -1785,7 +1785,7 @@ vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_mu(
@@ -1794,7 +1794,7 @@ vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_mu(
@@ -1803,7 +1803,7 @@ vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_mu(
@@ -1812,7 +1812,7 @@ vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_mu(
@@ -1821,7 +1821,7 @@ vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_mu(
@@ -1830,7 +1830,7 @@ vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_mu(
@@ -1839,7 +1839,7 @@ vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_mu(
@@ -1848,7 +1848,7 @@ vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_mu(
@@ -1857,7 +1857,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_mu(
@@ -1866,7 +1866,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_mu(
@@ -1875,7 +1875,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_mu(
@@ -1884,7 +1884,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_mu(
@@ -1893,7 +1893,7 @@ vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_mu(
@@ -1902,7 +1902,7 @@ vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_mu(
@@ -1911,7 +1911,7 @@ vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_mu(
@@ -1920,7 +1920,7 @@ vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_mu(
@@ -1929,7 +1929,7 @@ vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_mu(
@@ -1938,7 +1938,7 @@ vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_mu(
@@ -1947,7 +1947,7 @@ vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_mu(
@@ -1956,7 +1956,7 @@ vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_mu(
@@ -1965,7 +1965,7 @@ vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_mu(
@@ -1974,7 +1974,7 @@ vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_mu(
@@ -1983,7 +1983,7 @@ vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_mu(
@@ -1992,7 +1992,7 @@ vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_mu(
@@ -2001,7 +2001,7 @@ vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_mu(
@@ -2010,7 +2010,7 @@ vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_mu(
@@ -2019,7 +2019,7 @@ vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_mu(
@@ -2028,7 +2028,7 @@ vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_mu(
@@ -2037,7 +2037,7 @@ vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_mu(
@@ -2046,7 +2046,7 @@ vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_mu(
@@ -2055,7 +2055,7 @@ vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_mu(
@@ -2064,7 +2064,7 @@ vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_mu(
@@ -2073,7 +2073,7 @@ vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_mu(
@@ -2082,7 +2082,7 @@ vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_mu(
@@ -2091,7 +2091,7 @@ vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_mu(
@@ -2100,7 +2100,7 @@ vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_mu(
@@ -2109,7 +2109,7 @@ vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_mu(
@@ -2118,7 +2118,7 @@ vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_mu(
@@ -2127,7 +2127,7 @@ vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_mu(
@@ -2136,7 +2136,7 @@ vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_mu(
@@ -2145,7 +2145,7 @@ vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_mu(
@@ -2154,7 +2154,7 @@ vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_mu(
@@ -2163,6 +2163,6 @@ vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwadd_wx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwaddu.c
index 366fb95a4551..e753c5538865 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwaddu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwaddu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_tu(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_tu(
@@ -30,7 +30,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_tu(
@@ -39,7 +39,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_tu(
@@ -48,7 +48,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_tu(
@@ -57,7 +57,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_tu(
@@ -66,7 +66,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_tu(
@@ -75,7 +75,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_tu(
@@ -84,7 +84,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_tu(
@@ -93,7 +93,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_tu(
@@ -102,7 +102,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_tu(
@@ -111,7 +111,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_tu(
@@ -120,7 +120,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_tu(
@@ -129,7 +129,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_tu(
@@ -138,7 +138,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_tu(
@@ -147,7 +147,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_tu(
@@ -156,7 +156,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_tu(
@@ -165,7 +165,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_tu(
@@ -174,7 +174,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_tu(
@@ -183,7 +183,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_tu(
@@ -192,7 +192,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_tu(
@@ -201,7 +201,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_tu(
@@ -210,7 +210,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_tu(
@@ -219,7 +219,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_tu(
@@ -228,7 +228,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_tu(
@@ -237,7 +237,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_tu(
@@ -291,7 +291,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_tu(
@@ -300,7 +300,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_tu(
@@ -309,7 +309,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_tu(
@@ -318,7 +318,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_tu(
@@ -327,7 +327,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_tu(
@@ -336,7 +336,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_tu(
@@ -345,7 +345,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_tu(
@@ -354,7 +354,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_tu(
@@ -363,7 +363,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_tu(
@@ -372,7 +372,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_tu(
@@ -381,7 +381,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_tu(
@@ -390,7 +390,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_tu(
@@ -399,7 +399,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tu(
@@ -408,7 +408,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tu(
@@ -417,7 +417,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tu(
@@ -426,7 +426,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tu(
@@ -435,7 +435,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_tu(
@@ -444,7 +444,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_tu(
@@ -453,7 +453,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_tu(
@@ -462,7 +462,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_tu(
@@ -471,7 +471,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_tu(
@@ -480,7 +480,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_tu(
@@ -489,7 +489,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_tu(
@@ -498,7 +498,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_tu(
@@ -507,7 +507,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_tu(
@@ -516,7 +516,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_tu(
@@ -525,7 +525,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_tu(
@@ -534,7 +534,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_tu(
@@ -543,7 +543,7 @@ vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_tum(
@@ -552,7 +552,7 @@ vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_tum(
@@ -561,7 +561,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_tum(
@@ -570,7 +570,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_tum(
@@ -579,7 +579,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_tum(
@@ -588,7 +588,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_tum(
@@ -597,7 +597,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_tum(
@@ -606,7 +606,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_tum(
@@ -615,7 +615,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_tum(
@@ -624,7 +624,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_tum(
@@ -633,7 +633,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_tum(
@@ -642,7 +642,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_tum(
@@ -651,7 +651,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_tum(
@@ -660,7 +660,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_tum(
@@ -669,7 +669,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_tum(
@@ -678,7 +678,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_tum(
@@ -687,7 +687,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_tum(
@@ -696,7 +696,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_tum(
@@ -705,7 +705,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_tum(
@@ -714,7 +714,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_tum(
@@ -723,7 +723,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_tum(
@@ -732,7 +732,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_tum(
@@ -741,7 +741,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_tum(
@@ -750,7 +750,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_tum(
@@ -759,7 +759,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_tum(
@@ -768,7 +768,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_tum(
@@ -777,7 +777,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_tum(
@@ -786,7 +786,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_tum(
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_tum(
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_tum(
@@ -813,7 +813,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_tum(
@@ -822,7 +822,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_tum(
@@ -831,7 +831,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_tum(
@@ -840,7 +840,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_tum(
@@ -849,7 +849,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_tum(
@@ -858,7 +858,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_tum(
@@ -867,7 +867,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_tum(
@@ -876,7 +876,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_tum(
@@ -885,7 +885,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_tum(
@@ -894,7 +894,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_tum(
@@ -903,7 +903,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_tum(
@@ -912,7 +912,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_tum(
@@ -921,7 +921,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_tum(
@@ -930,7 +930,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_tum(
@@ -939,7 +939,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tum(
@@ -948,7 +948,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tum(
@@ -957,7 +957,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tum(
@@ -966,7 +966,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tum(
@@ -975,7 +975,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_tum(
@@ -984,7 +984,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_tum(
@@ -993,7 +993,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_tum(
@@ -1002,7 +1002,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_tum(
@@ -1011,7 +1011,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_tum(
@@ -1020,7 +1020,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_tum(
@@ -1029,7 +1029,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_tum(
@@ -1038,7 +1038,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_tum(
@@ -1047,7 +1047,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_tum(
@@ -1056,7 +1056,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_tum(
@@ -1065,7 +1065,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_tum(
@@ -1074,7 +1074,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_tum(
@@ -1083,7 +1083,7 @@ vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_tumu(
@@ -1092,7 +1092,7 @@ vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_tumu(
@@ -1101,7 +1101,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_tumu(
@@ -1110,7 +1110,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_tumu(
@@ -1119,7 +1119,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_tumu(
@@ -1128,7 +1128,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_tumu(
@@ -1137,7 +1137,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_tumu(
@@ -1146,7 +1146,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_tumu(
@@ -1155,7 +1155,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_tumu(
@@ -1164,7 +1164,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_tumu(
@@ -1173,7 +1173,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_tumu(
@@ -1182,7 +1182,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_tumu(
@@ -1191,7 +1191,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_tumu(
@@ -1200,7 +1200,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_tumu(
@@ -1209,7 +1209,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_tumu(
@@ -1218,7 +1218,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_tumu(
@@ -1227,7 +1227,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_tumu(
@@ -1236,7 +1236,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_tumu(
@@ -1245,7 +1245,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_tumu(
@@ -1254,7 +1254,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_tumu(
@@ -1263,7 +1263,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_tumu(
@@ -1272,7 +1272,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_tumu(
@@ -1281,7 +1281,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_tumu(
@@ -1290,7 +1290,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_tumu(
@@ -1299,7 +1299,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_tumu(
@@ -1308,7 +1308,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_tumu(
@@ -1317,7 +1317,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_tumu(
@@ -1326,7 +1326,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_tumu(
@@ -1335,7 +1335,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_tumu(
@@ -1344,7 +1344,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_tumu(
@@ -1353,7 +1353,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_tumu(
@@ -1362,7 +1362,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_tumu(
@@ -1371,7 +1371,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_tumu(
@@ -1380,7 +1380,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_tumu(
@@ -1389,7 +1389,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_tumu(
@@ -1398,7 +1398,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_tumu(
@@ -1407,7 +1407,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_tumu(
@@ -1416,7 +1416,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_tumu(
@@ -1425,7 +1425,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_tumu(
@@ -1434,7 +1434,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_tumu(
@@ -1443,7 +1443,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_tumu(
@@ -1452,7 +1452,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_tumu(
@@ -1461,7 +1461,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_tumu(
@@ -1470,7 +1470,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_tumu(
@@ -1479,7 +1479,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tumu(
@@ -1488,7 +1488,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tumu(
@@ -1497,7 +1497,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tumu(
@@ -1506,7 +1506,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tumu(
@@ -1515,7 +1515,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_tumu(
@@ -1524,7 +1524,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_tumu(
@@ -1533,7 +1533,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_tumu(
@@ -1542,7 +1542,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_tumu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_tumu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_tumu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_tumu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_tumu(
@@ -1587,7 +1587,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_tumu(
@@ -1596,7 +1596,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_tumu(
@@ -1605,7 +1605,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_tumu(
@@ -1614,7 +1614,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_tumu(
@@ -1623,7 +1623,7 @@ vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_mu(
@@ -1632,7 +1632,7 @@ vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_mu(
@@ -1641,7 +1641,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_mu(
@@ -1650,7 +1650,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_mu(
@@ -1659,7 +1659,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_mu(
@@ -1668,7 +1668,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_mu(
@@ -1677,7 +1677,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_mu(
@@ -1686,7 +1686,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_mu(
@@ -1695,7 +1695,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_mu(
@@ -1704,7 +1704,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_mu(
@@ -1713,7 +1713,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_mu(
@@ -1722,7 +1722,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_mu(
@@ -1731,7 +1731,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_mu(
@@ -1740,7 +1740,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_mu(
@@ -1749,7 +1749,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_mu(
@@ -1758,7 +1758,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_mu(
@@ -1767,7 +1767,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_mu(
@@ -1776,7 +1776,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_mu(
@@ -1785,7 +1785,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_mu(
@@ -1794,7 +1794,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_mu(
@@ -1803,7 +1803,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_mu(
@@ -1812,7 +1812,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_mu(
@@ -1821,7 +1821,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_mu(
@@ -1830,7 +1830,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_mu(
@@ -1839,7 +1839,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_mu(
@@ -1848,7 +1848,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_mu(
@@ -1857,7 +1857,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_mu(
@@ -1866,7 +1866,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_mu(
@@ -1875,7 +1875,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_mu(
@@ -1884,7 +1884,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_mu(
@@ -1893,7 +1893,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_mu(
@@ -1902,7 +1902,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_mu(
@@ -1911,7 +1911,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_mu(
@@ -1920,7 +1920,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_mu(
@@ -1929,7 +1929,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_mu(
@@ -1938,7 +1938,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_mu(
@@ -1947,7 +1947,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_mu(
@@ -1956,7 +1956,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_mu(
@@ -1965,7 +1965,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_mu(
@@ -1974,7 +1974,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_mu(
@@ -1983,7 +1983,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_mu(
@@ -1992,7 +1992,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_mu(
@@ -2001,7 +2001,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_mu(
@@ -2010,7 +2010,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_mu(
@@ -2019,7 +2019,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_mu(
@@ -2028,7 +2028,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_mu(
@@ -2037,7 +2037,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_mu(
@@ -2046,7 +2046,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_mu(
@@ -2055,7 +2055,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_mu(
@@ -2064,7 +2064,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_mu(
@@ -2073,7 +2073,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_mu(
@@ -2082,7 +2082,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_mu(
@@ -2091,7 +2091,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_mu(
@@ -2100,7 +2100,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_mu(
@@ -2109,7 +2109,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_mu(
@@ -2118,7 +2118,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_mu(
@@ -2127,7 +2127,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_mu(
@@ -2136,7 +2136,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_mu(
@@ -2145,7 +2145,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_mu(
@@ -2154,7 +2154,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_mu(
@@ -2163,6 +2163,6 @@ vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwaddu_wx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvt.c
index 7f211d92c939..e4e3ef4aaa3f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvt.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_tu(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t src, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_tu(
@@ -30,7 +30,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwcvt_x_x_v_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_tu(
@@ -39,7 +39,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwcvt_x_x_v_i16m2_tu(vint16m2_t maskedoff, vint8m1_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_tu(
@@ -48,7 +48,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_tu(vint16m2_t maskedoff, vint8m1_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwcvt_x_x_v_i16m4_tu(vint16m4_t maskedoff, vint8m2_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_tu(
@@ -57,7 +57,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_tu(vint16m4_t maskedoff, vint8m2_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwcvt_x_x_v_i16m8_tu(vint16m8_t maskedoff, vint8m4_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_tu(
@@ -66,7 +66,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_tu(vint16m8_t maskedoff, vint8m4_t src, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_tu(
@@ -75,7 +75,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_tu(
@@ -84,7 +84,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwcvt_x_x_v_i32m2_tu(vint32m2_t maskedoff, vint16m1_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_tu(
@@ -93,7 +93,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_tu(vint32m2_t maskedoff, vint16m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwcvt_x_x_v_i32m4_tu(vint32m4_t maskedoff, vint16m2_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_tu(
@@ -102,7 +102,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_tu(vint32m4_t maskedoff, vint16m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwcvt_x_x_v_i32m8_tu(vint32m8_t maskedoff, vint16m4_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tu(
@@ -111,7 +111,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_tu(vint32m8_t maskedoff, vint16m4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_tu(
@@ -120,7 +120,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t src, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwcvt_x_x_v_i64m2_tu(vint64m2_t maskedoff, vint32m1_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_tu(
@@ -129,7 +129,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_tu(vint64m2_t maskedoff, vint32m1_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwcvt_x_x_v_i64m4_tu(vint64m4_t maskedoff, vint32m2_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_tu(
@@ -138,7 +138,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_tu(vint64m4_t maskedoff, vint32m2_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwcvt_x_x_v_i64m8_tu(vint64m8_t maskedoff, vint32m4_t src, size_t vl) {
- return vwcvt_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvt_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_tum(
@@ -147,7 +147,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_tu(vint64m8_t maskedoff, vint32m4_t src, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_tum(
@@ -156,7 +156,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_tum(
@@ -165,7 +165,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_tum(
@@ -174,7 +174,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_tum(
@@ -183,7 +183,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_tum(
@@ -192,7 +192,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_tum(
@@ -201,7 +201,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_tum(
@@ -210,7 +210,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_tum(
@@ -219,7 +219,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_tum(
@@ -228,7 +228,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_tum(
@@ -237,7 +237,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tum(
@@ -246,7 +246,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_tum(
@@ -255,7 +255,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_tum(
@@ -264,7 +264,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_tum(
@@ -273,7 +273,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) {
- return vwcvt_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_tumu(
@@ -282,7 +282,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_tumu(
@@ -291,7 +291,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_tumu(
@@ -300,7 +300,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_tumu(
@@ -309,7 +309,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_tumu(
@@ -318,7 +318,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_tumu(
@@ -327,7 +327,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_tumu(
@@ -336,7 +336,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_tumu(
@@ -345,7 +345,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_tumu(
@@ -354,7 +354,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_tumu(
@@ -363,7 +363,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_tumu(
@@ -372,7 +372,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tumu(
@@ -381,7 +381,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_tumu(
@@ -390,7 +390,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_tumu(
@@ -399,7 +399,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_tumu(
@@ -408,7 +408,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) {
- return vwcvt_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_mu(
@@ -417,7 +417,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_mu(
@@ -426,7 +426,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_mu(
@@ -435,7 +435,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_mu(
@@ -444,7 +444,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_mu(
@@ -453,7 +453,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_mu(
@@ -462,7 +462,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_mu(
@@ -471,7 +471,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_mu(
@@ -480,7 +480,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_mu(
@@ -489,7 +489,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_mu(
@@ -498,7 +498,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_mu(
@@ -507,7 +507,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_mu(
@@ -516,7 +516,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_mu(
@@ -525,7 +525,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_mu(
@@ -534,7 +534,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_mu(
@@ -543,6 +543,6 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwcvt_x_x_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) {
- return vwcvt_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvtu.c
index fd734e6f5d1c..6fec2aaf905c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvtu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwcvtu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_tu(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t src
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_tu(
@@ -30,7 +30,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t src
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_tu(
@@ -39,7 +39,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_tu(
@@ -48,7 +48,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t src, si
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_tu(
@@ -57,7 +57,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t src, si
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_tu(
@@ -66,7 +66,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t src, si
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_tu(
@@ -75,7 +75,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t sr
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_tu(
@@ -84,7 +84,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_tu(
@@ -93,7 +93,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_tu(
@@ -102,7 +102,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tu(
@@ -111,7 +111,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_tu(
@@ -120,7 +120,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t src,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_tu(
@@ -129,7 +129,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t src, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_tu(
@@ -138,7 +138,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t src, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) {
- return vwcvtu_x_tu(maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tu(maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_tum(
@@ -147,7 +147,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t src, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_tum(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_tum(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_tum(
@@ -174,7 +174,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_tum(
@@ -183,7 +183,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_tum(
@@ -192,7 +192,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_tum(
@@ -201,7 +201,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_tum(
@@ -210,7 +210,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_tum(
@@ -219,7 +219,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_tum(
@@ -228,7 +228,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_tum(
@@ -237,7 +237,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tum(
@@ -246,7 +246,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_tum(
@@ -255,7 +255,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_tum(
@@ -264,7 +264,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_tum(
@@ -273,7 +273,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) {
- return vwcvtu_x_tum(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_tumu(
@@ -282,7 +282,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_tumu(
@@ -291,7 +291,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_tumu(
@@ -300,7 +300,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_tumu(
@@ -309,7 +309,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_tumu(
@@ -318,7 +318,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_tumu(
@@ -327,7 +327,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_tumu(
@@ -336,7 +336,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_tumu(
@@ -345,7 +345,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_tumu(
@@ -354,7 +354,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_tumu(
@@ -363,7 +363,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_tumu(
@@ -372,7 +372,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tumu(
@@ -381,7 +381,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_tumu(
@@ -390,7 +390,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_tumu(
@@ -399,7 +399,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_tumu(
@@ -408,7 +408,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) {
- return vwcvtu_x_tumu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_mu(
@@ -417,7 +417,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_mu(
@@ -426,7 +426,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_mu(
@@ -435,7 +435,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_mu(
@@ -444,7 +444,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_mu(
@@ -453,7 +453,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_mu(
@@ -462,7 +462,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_mu(
@@ -471,7 +471,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_mu(
@@ -480,7 +480,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_mu(
@@ -489,7 +489,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_mu(
@@ -498,7 +498,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_mu(
@@ -507,7 +507,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_mu(
@@ -516,7 +516,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_mu(
@@ -525,7 +525,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_mu(
@@ -534,7 +534,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_mu(
@@ -543,6 +543,6 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwcvtu_x_x_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) {
- return vwcvtu_x_mu(mask, maskedoff, src, vl);
+ return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmacc.c
index a9efc88fbf15..fa7e86c91c4e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmacc.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_tu(
@@ -22,7 +22,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_tu(
@@ -31,7 +31,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_tu(
@@ -40,7 +40,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_tu(
@@ -49,7 +49,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_tu(
@@ -58,7 +58,7 @@ vint16m1_t test_vwmacc_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_tu(
@@ -67,7 +67,7 @@ vint16m1_t test_vwmacc_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_tu(
@@ -76,7 +76,7 @@ vint16m2_t test_vwmacc_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_tu(
@@ -85,7 +85,7 @@ vint16m2_t test_vwmacc_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_tu(
@@ -94,7 +94,7 @@ vint16m4_t test_vwmacc_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_tu(
@@ -103,7 +103,7 @@ vint16m4_t test_vwmacc_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_tu(
@@ -112,7 +112,7 @@ vint16m8_t test_vwmacc_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_tu(
@@ -121,7 +121,7 @@ vint16m8_t test_vwmacc_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_tu(
@@ -130,7 +130,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_tu(
@@ -139,7 +139,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_tu(
@@ -148,7 +148,7 @@ vint32m1_t test_vwmacc_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_tu(
@@ -157,7 +157,7 @@ vint32m1_t test_vwmacc_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_tu(
@@ -166,7 +166,7 @@ vint32m2_t test_vwmacc_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_tu(
@@ -175,7 +175,7 @@ vint32m2_t test_vwmacc_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_tu(
@@ -184,7 +184,7 @@ vint32m4_t test_vwmacc_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_tu(
@@ -193,7 +193,7 @@ vint32m4_t test_vwmacc_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_tu(
@@ -202,7 +202,7 @@ vint32m8_t test_vwmacc_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tu(
@@ -211,7 +211,7 @@ vint32m8_t test_vwmacc_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tu(
@@ -220,7 +220,7 @@ vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_tu(
@@ -229,7 +229,7 @@ vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_tu(
@@ -238,7 +238,7 @@ vint64m2_t test_vwmacc_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_tu(
@@ -247,7 +247,7 @@ vint64m2_t test_vwmacc_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_tu(
@@ -256,7 +256,7 @@ vint64m4_t test_vwmacc_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_tu(
@@ -265,7 +265,7 @@ vint64m4_t test_vwmacc_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vwmacc_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_tu(
@@ -274,7 +274,7 @@ vint64m8_t test_vwmacc_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmacc_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_tum(
@@ -283,7 +283,7 @@ vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_tum(
@@ -292,7 +292,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_tum(
@@ -301,7 +301,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_tum(
@@ -310,7 +310,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_tum(
@@ -319,7 +319,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_tum(
@@ -328,7 +328,7 @@ vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_tum(
@@ -337,7 +337,7 @@ vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_tum(
@@ -346,7 +346,7 @@ vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_tum(
@@ -355,7 +355,7 @@ vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_tum(
@@ -364,7 +364,7 @@ vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_tum(
@@ -373,7 +373,7 @@ vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_tum(
@@ -382,7 +382,7 @@ vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_tum(
@@ -391,7 +391,7 @@ vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_tum(
@@ -400,7 +400,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_tum(
@@ -409,7 +409,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_tum(
@@ -418,7 +418,7 @@ vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_tum(
@@ -427,7 +427,7 @@ vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_tum(
@@ -436,7 +436,7 @@ vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_tum(
@@ -445,7 +445,7 @@ vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_tum(
@@ -454,7 +454,7 @@ vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_tum(
@@ -463,7 +463,7 @@ vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_tum(
@@ -472,7 +472,7 @@ vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tum(
@@ -481,7 +481,7 @@ vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tum(
@@ -490,7 +490,7 @@ vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_tum(
@@ -499,7 +499,7 @@ vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_tum(
@@ -508,7 +508,7 @@ vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_tum(
@@ -517,7 +517,7 @@ vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_tum(
@@ -526,7 +526,7 @@ vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_tum(
@@ -535,7 +535,7 @@ vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_tum(
@@ -544,7 +544,7 @@ vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmacc_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_tumu(
@@ -553,7 +553,7 @@ vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_tumu(
@@ -562,7 +562,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_tumu(
@@ -571,7 +571,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_tumu(
@@ -580,7 +580,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_tumu(
@@ -589,7 +589,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_tumu(
@@ -598,7 +598,7 @@ vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_tumu(
@@ -607,7 +607,7 @@ vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_tumu(
@@ -616,7 +616,7 @@ vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_tumu(
@@ -625,7 +625,7 @@ vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_tumu(
@@ -634,7 +634,7 @@ vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_tumu(
@@ -643,7 +643,7 @@ vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_tumu(
@@ -652,7 +652,7 @@ vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_tumu(
@@ -661,7 +661,7 @@ vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_tumu(
@@ -670,7 +670,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_tumu(
@@ -679,7 +679,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_tumu(
@@ -688,7 +688,7 @@ vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_tumu(
@@ -697,7 +697,7 @@ vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_tumu(
@@ -706,7 +706,7 @@ vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_tumu(
@@ -715,7 +715,7 @@ vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_tumu(
@@ -724,7 +724,7 @@ vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_tumu(
@@ -733,7 +733,7 @@ vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_tumu(
@@ -742,7 +742,7 @@ vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tumu(
@@ -751,7 +751,7 @@ vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tumu(
@@ -760,7 +760,7 @@ vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_tumu(
@@ -769,7 +769,7 @@ vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_tumu(
@@ -778,7 +778,7 @@ vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_tumu(
@@ -787,7 +787,7 @@ vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_tumu(
@@ -796,7 +796,7 @@ vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_tumu(
@@ -805,7 +805,7 @@ vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_tumu(
@@ -814,7 +814,7 @@ vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmacc_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_mu(
@@ -823,7 +823,7 @@ vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_mu(
@@ -832,7 +832,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_mu(
@@ -841,7 +841,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_mu(
@@ -850,7 +850,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_mu(
@@ -859,7 +859,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_mu(
@@ -868,7 +868,7 @@ vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_mu(
@@ -877,7 +877,7 @@ vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_mu(
@@ -886,7 +886,7 @@ vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_mu(
@@ -895,7 +895,7 @@ vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_mu(
@@ -904,7 +904,7 @@ vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_mu(
@@ -913,7 +913,7 @@ vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_mu(
@@ -922,7 +922,7 @@ vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_mu(
@@ -931,7 +931,7 @@ vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_mu(
@@ -940,7 +940,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_mu(
@@ -949,7 +949,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_mu(
@@ -958,7 +958,7 @@ vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_mu(
@@ -967,7 +967,7 @@ vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_mu(
@@ -976,7 +976,7 @@ vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_mu(
@@ -985,7 +985,7 @@ vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_mu(
@@ -994,7 +994,7 @@ vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_mu(
@@ -1003,7 +1003,7 @@ vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_mu(
@@ -1012,7 +1012,7 @@ vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_mu(
@@ -1021,7 +1021,7 @@ vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_mu(
@@ -1030,7 +1030,7 @@ vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_mu(
@@ -1039,7 +1039,7 @@ vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_mu(
@@ -1048,7 +1048,7 @@ vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_mu(
@@ -1057,7 +1057,7 @@ vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_mu(
@@ -1066,7 +1066,7 @@ vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_mu(
@@ -1075,7 +1075,7 @@ vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_mu(
@@ -1084,6 +1084,6 @@ vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmacc_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccsu.c
index 3b7e0f3ff8c0..67601a145dfc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccsu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccsu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_tu(
@@ -22,7 +22,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_tu(
@@ -31,7 +31,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vuint8mf8_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_tu(
@@ -40,7 +40,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_tu(
@@ -49,7 +49,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vuint8mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_tu(
@@ -58,7 +58,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_tu(
@@ -67,7 +67,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_tu(
@@ -76,7 +76,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_tu(
@@ -85,7 +85,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_tu(
@@ -94,7 +94,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_tu(
@@ -103,7 +103,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_tu(
@@ -112,7 +112,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_tu(
@@ -121,7 +121,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_tu(
@@ -130,7 +130,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_tu(
@@ -139,7 +139,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vuint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_tu(
@@ -148,7 +148,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_tu(
@@ -157,7 +157,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_tu(
@@ -166,7 +166,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_tu(
@@ -175,7 +175,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_tu(
@@ -184,7 +184,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_tu(
@@ -193,7 +193,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_tu(
@@ -202,7 +202,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tu(
@@ -211,7 +211,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tu(
@@ -220,7 +220,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_tu(
@@ -229,7 +229,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_tu(
@@ -238,7 +238,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_tu(
@@ -247,7 +247,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_tu(
@@ -256,7 +256,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_tu(
@@ -265,7 +265,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_tu(
@@ -274,7 +274,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_tum(
@@ -283,7 +283,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_tum(
@@ -292,7 +292,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_tum(
@@ -301,7 +301,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_tum(
@@ -310,7 +310,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_tum(
@@ -319,7 +319,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_tum(
@@ -328,7 +328,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_tum(
@@ -337,7 +337,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_tum(
@@ -346,7 +346,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_tum(
@@ -355,7 +355,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_tum(
@@ -364,7 +364,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_tum(
@@ -373,7 +373,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_tum(
@@ -382,7 +382,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_tum(
@@ -391,7 +391,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_tum(
@@ -400,7 +400,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_tum(
@@ -409,7 +409,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_tum(
@@ -418,7 +418,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_tum(
@@ -427,7 +427,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_tum(
@@ -436,7 +436,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_tum(
@@ -445,7 +445,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_tum(
@@ -454,7 +454,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_tum(
@@ -463,7 +463,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_tum(
@@ -472,7 +472,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tum(
@@ -481,7 +481,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tum(
@@ -490,7 +490,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_tum(
@@ -499,7 +499,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_tum(
@@ -508,7 +508,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_tum(
@@ -517,7 +517,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_tum(
@@ -526,7 +526,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_tum(
@@ -535,7 +535,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_tum(
@@ -544,7 +544,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_tumu(
@@ -553,7 +553,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_tumu(
@@ -562,7 +562,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_tumu(
@@ -571,7 +571,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_tumu(
@@ -580,7 +580,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_tumu(
@@ -589,7 +589,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_tumu(
@@ -598,7 +598,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_tumu(
@@ -607,7 +607,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_tumu(
@@ -616,7 +616,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_tumu(
@@ -625,7 +625,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_tumu(
@@ -634,7 +634,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_tumu(
@@ -643,7 +643,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_tumu(
@@ -652,7 +652,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_tumu(
@@ -661,7 +661,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_tumu(
@@ -670,7 +670,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_tumu(
@@ -679,7 +679,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_tumu(
@@ -688,7 +688,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_tumu(
@@ -697,7 +697,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_tumu(
@@ -706,7 +706,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_tumu(
@@ -715,7 +715,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_tumu(
@@ -724,7 +724,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_tumu(
@@ -733,7 +733,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_tumu(
@@ -742,7 +742,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tumu(
@@ -751,7 +751,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tumu(
@@ -760,7 +760,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_tumu(
@@ -769,7 +769,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_tumu(
@@ -778,7 +778,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_tumu(
@@ -787,7 +787,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_tumu(
@@ -796,7 +796,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_tumu(
@@ -805,7 +805,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_tumu(
@@ -814,7 +814,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_mu(
@@ -823,7 +823,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_mu(
@@ -832,7 +832,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_mu(
@@ -841,7 +841,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_mu(
@@ -850,7 +850,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_mu(
@@ -859,7 +859,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_mu(
@@ -868,7 +868,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_mu(
@@ -877,7 +877,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_mu(
@@ -886,7 +886,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_mu(
@@ -895,7 +895,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_mu(
@@ -904,7 +904,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_mu(
@@ -913,7 +913,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_mu(
@@ -922,7 +922,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_mu(
@@ -931,7 +931,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_mu(
@@ -940,7 +940,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_mu(
@@ -949,7 +949,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_mu(
@@ -958,7 +958,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_mu(
@@ -967,7 +967,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_mu(
@@ -976,7 +976,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_mu(
@@ -985,7 +985,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_mu(
@@ -994,7 +994,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_mu(
@@ -1003,7 +1003,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_mu(
@@ -1012,7 +1012,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_mu(
@@ -1021,7 +1021,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_mu(
@@ -1030,7 +1030,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_mu(
@@ -1039,7 +1039,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_mu(
@@ -1048,7 +1048,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_mu(
@@ -1057,7 +1057,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_mu(
@@ -1066,7 +1066,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_mu(
@@ -1075,7 +1075,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_mu(
@@ -1084,6 +1084,6 @@ vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccsu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccu.c
index 08d553a2e3e0..98ad9f48838f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccu.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_tu(
@@ -22,7 +22,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4_tu(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_tu(
@@ -31,7 +31,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_tu(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_tu(
@@ -40,7 +40,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2_tu(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_tu(
@@ -49,7 +49,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_tu(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_tu(
@@ -58,7 +58,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1_tu(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_tu(
@@ -67,7 +67,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_tu(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_tu(
@@ -76,7 +76,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2_tu(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_tu(
@@ -85,7 +85,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_tu(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_tu(
@@ -94,7 +94,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4_tu(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_tu(
@@ -103,7 +103,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_tu(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_tu(
@@ -112,7 +112,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8_tu(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_tu(
@@ -121,7 +121,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_tu(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_tu(
@@ -130,7 +130,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2_tu(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_tu(
@@ -139,7 +139,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_tu(vuint32mf2_t vd, uint16_t rs1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_tu(
@@ -148,7 +148,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1_tu(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_tu(
@@ -157,7 +157,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_tu(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_tu(
@@ -166,7 +166,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2_tu(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_tu(
@@ -175,7 +175,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_tu(vuint32m2_t vd, uint16_t rs1, vuint16m1_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_tu(
@@ -184,7 +184,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4_tu(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_tu(
@@ -193,7 +193,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_tu(vuint32m4_t vd, uint16_t rs1, vuint16m2_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_tu(
@@ -202,7 +202,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8_tu(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tu(
@@ -211,7 +211,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_tu(vuint32m8_t vd, uint16_t rs1, vuint16m4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tu(
@@ -220,7 +220,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_tu(
@@ -229,7 +229,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_tu(
@@ -238,7 +238,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2_tu(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_tu(
@@ -247,7 +247,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_tu(vuint64m2_t vd, uint32_t rs1, vuint32m1_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_tu(
@@ -256,7 +256,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4_tu(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_tu(
@@ -265,7 +265,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_tu(vuint64m4_t vd, uint32_t rs1, vuint32m2_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_tu(
@@ -274,7 +274,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_tum(
@@ -283,7 +283,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, vuint32m4_t v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_tum(
@@ -292,7 +292,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_tum(
@@ -301,7 +301,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_tum(
@@ -310,7 +310,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_tum(
@@ -319,7 +319,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_tum(
@@ -328,7 +328,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_tum(
@@ -337,7 +337,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_tum(
@@ -346,7 +346,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_tum(
@@ -355,7 +355,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_tum(
@@ -364,7 +364,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_tum(
@@ -373,7 +373,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_tum(
@@ -382,7 +382,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_tum(
@@ -391,7 +391,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_tum(
@@ -400,7 +400,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_tum(
@@ -409,7 +409,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint16_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_tum(
@@ -418,7 +418,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_tum(
@@ -427,7 +427,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_tum(
@@ -436,7 +436,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_tum(
@@ -445,7 +445,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_tum(
@@ -454,7 +454,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_tum(
@@ -463,7 +463,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_tum(
@@ -472,7 +472,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tum(
@@ -481,7 +481,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tum(
@@ -490,7 +490,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_tum(
@@ -499,7 +499,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_tum(
@@ -508,7 +508,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_tum(
@@ -517,7 +517,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_tum(
@@ -526,7 +526,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_tum(
@@ -535,7 +535,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_tum(
@@ -544,7 +544,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_tumu(
@@ -553,7 +553,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_tumu(
@@ -562,7 +562,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_tumu(
@@ -571,7 +571,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_tumu(
@@ -580,7 +580,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_tumu(
@@ -589,7 +589,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint8_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_tumu(
@@ -598,7 +598,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_tumu(
@@ -607,7 +607,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint8_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_tumu(
@@ -616,7 +616,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_tumu(
@@ -625,7 +625,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_tumu(
@@ -634,7 +634,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_tumu(
@@ -643,7 +643,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_tumu(
@@ -652,7 +652,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_tumu(
@@ -661,7 +661,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_tumu(
@@ -670,7 +670,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_tumu(
@@ -679,7 +679,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_tumu(
@@ -688,7 +688,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_tumu(
@@ -697,7 +697,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_tumu(
@@ -706,7 +706,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_tumu(
@@ -715,7 +715,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_tumu(
@@ -724,7 +724,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_tumu(
@@ -733,7 +733,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_tumu(
@@ -742,7 +742,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tumu(
@@ -751,7 +751,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tumu(
@@ -760,7 +760,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_tumu(
@@ -769,7 +769,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_tumu(
@@ -778,7 +778,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_tumu(
@@ -787,7 +787,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_tumu(
@@ -796,7 +796,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_tumu(
@@ -805,7 +805,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_tumu(
@@ -814,7 +814,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_mu(
@@ -823,7 +823,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_mu(
@@ -832,7 +832,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_mu(
@@ -841,7 +841,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_mu(
@@ -850,7 +850,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_mu(
@@ -859,7 +859,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_mu(
@@ -868,7 +868,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_mu(
@@ -877,7 +877,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_mu(
@@ -886,7 +886,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t v
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_mu(
@@ -895,7 +895,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_mu(
@@ -904,7 +904,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t v
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_mu(
@@ -913,7 +913,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_mu(
@@ -922,7 +922,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t v
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_mu(
@@ -931,7 +931,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_mu(
@@ -940,7 +940,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_mu(
@@ -949,7 +949,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_mu(
@@ -958,7 +958,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_mu(
@@ -967,7 +967,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_mu(
@@ -976,7 +976,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_mu(
@@ -985,7 +985,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_mu(
@@ -994,7 +994,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_mu(
@@ -1003,7 +1003,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_mu(
@@ -1012,7 +1012,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_mu(
@@ -1021,7 +1021,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_mu(
@@ -1030,7 +1030,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_mu(
@@ -1039,7 +1039,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_mu(
@@ -1048,7 +1048,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_mu(
@@ -1057,7 +1057,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_mu(
@@ -1066,7 +1066,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_mu(
@@ -1075,7 +1075,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, vs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_mu(
@@ -1084,6 +1084,6 @@ vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
- return vwmaccu_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccus.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccus.c
index bdc9b8aa62be..2d216a1a5bd8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccus.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccus.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4_tu(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_tu(
@@ -22,7 +22,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_tu(vint16mf4_t vd, uint8_t rs1, vint8mf8_t v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2_tu(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_tu(
@@ -31,7 +31,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_tu(vint16mf2_t vd, uint8_t rs1, vint8mf4_t v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1_tu(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_tu(
@@ -40,7 +40,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_tu(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2_tu(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_tu(
@@ -49,7 +49,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_tu(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4_tu(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_tu(
@@ -58,7 +58,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_tu(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8_tu(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_tu(
@@ -67,7 +67,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_tu(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2_tu(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_tu(
@@ -76,7 +76,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_tu(vint32mf2_t vd, uint16_t rs1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1_tu(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_tu(
@@ -85,7 +85,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_tu(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2_tu(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_tu(
@@ -94,7 +94,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_tu(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4_tu(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_tu(
@@ -103,7 +103,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_tu(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8_tu(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tu(
@@ -112,7 +112,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_tu(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_tu(
@@ -121,7 +121,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2_tu(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_tu(
@@ -130,7 +130,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_tu(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4_tu(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_tu(
@@ -139,7 +139,7 @@ vint64m4_t test_vwmaccus_vx_i64m4_tu(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmaccus_tu(vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tu(vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_tum(
@@ -148,7 +148,7 @@ vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_tum(
@@ -157,7 +157,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_tum(
@@ -166,7 +166,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_tum(
@@ -175,7 +175,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_tum(
@@ -184,7 +184,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_tum(
@@ -193,7 +193,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_tum(
@@ -202,7 +202,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_tum(
@@ -211,7 +211,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_tum(
@@ -220,7 +220,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_tum(
@@ -229,7 +229,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_tum(
@@ -238,7 +238,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tum(
@@ -247,7 +247,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_tum(
@@ -256,7 +256,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_tum(
@@ -265,7 +265,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_tum(
@@ -274,7 +274,7 @@ vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmaccus_tum(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_tumu(
@@ -283,7 +283,7 @@ vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_tumu(
@@ -292,7 +292,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_tumu(
@@ -301,7 +301,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_tumu(
@@ -310,7 +310,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, uint8_t rs
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_tumu(
@@ -319,7 +319,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_tumu(
@@ -328,7 +328,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_tumu(
@@ -337,7 +337,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, uint8_t rs1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_tumu(
@@ -346,7 +346,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, uint16_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_tumu(
@@ -355,7 +355,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_tumu(
@@ -364,7 +364,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, uint16_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_tumu(
@@ -373,7 +373,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tumu(
@@ -382,7 +382,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, uint16_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_tumu(
@@ -391,7 +391,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_tumu(
@@ -400,7 +400,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_tumu(
@@ -409,7 +409,7 @@ vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, uint32_t r
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmaccus_tumu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_mu(
@@ -418,7 +418,7 @@ vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, uint32_t rs
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_mu(
@@ -427,7 +427,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, uint8_t r
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_mu(
@@ -436,7 +436,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, uint8_t r
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_mu(
@@ -445,7 +445,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_mu(
@@ -454,7 +454,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_mu(
@@ -463,7 +463,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_mu(
@@ -472,7 +472,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, uint8_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_mu(
@@ -481,7 +481,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_mu(
@@ -490,7 +490,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_mu(
@@ -499,7 +499,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, uint16_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_mu(
@@ -508,7 +508,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_mu(
@@ -517,7 +517,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, uint16_t rs1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_mu(
@@ -526,7 +526,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_mu(
@@ -535,7 +535,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_mu(
@@ -544,6 +544,6 @@ vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, uint32_t rs1
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) {
- return vwmaccus_mu(mask, vd, rs1, vs2, vl);
+ return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmul.c
index 7b12b8f7cb01..99c7fecdf64a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmul.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_tu(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_tu(
@@ -30,7 +30,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_tu(
@@ -39,7 +39,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_tu(
@@ -48,7 +48,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_tu(
@@ -57,7 +57,7 @@ vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_tu(
@@ -66,7 +66,7 @@ vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_tu(
@@ -75,7 +75,7 @@ vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_tu(
@@ -84,7 +84,7 @@ vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_tu(
@@ -93,7 +93,7 @@ vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_tu(
@@ -102,7 +102,7 @@ vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_tu(
@@ -111,7 +111,7 @@ vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_tu(
@@ -120,7 +120,7 @@ vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_tu(
@@ -129,7 +129,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_tu(
@@ -138,7 +138,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_tu(
@@ -156,7 +156,7 @@ vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_tu(
@@ -165,7 +165,7 @@ vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_tu(
@@ -174,7 +174,7 @@ vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_tu(
@@ -183,7 +183,7 @@ vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_tu(
@@ -192,7 +192,7 @@ vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_tu(
@@ -201,7 +201,7 @@ vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tu(
@@ -210,7 +210,7 @@ vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tu(
@@ -219,7 +219,7 @@ vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_tu(
@@ -228,7 +228,7 @@ vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_tu(
@@ -237,7 +237,7 @@ vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_tu(
@@ -246,7 +246,7 @@ vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_tu(
@@ -255,7 +255,7 @@ vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_tu(
@@ -264,7 +264,7 @@ vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_tu(
@@ -273,7 +273,7 @@ vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_tum(
@@ -282,7 +282,7 @@ vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_tum(
@@ -291,7 +291,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_tum(
@@ -300,7 +300,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_tum(
@@ -309,7 +309,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_tum(
@@ -318,7 +318,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_tum(
@@ -327,7 +327,7 @@ vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_tum(
@@ -336,7 +336,7 @@ vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_tum(
@@ -345,7 +345,7 @@ vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_tum(
@@ -354,7 +354,7 @@ vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_tum(
@@ -363,7 +363,7 @@ vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_tum(
@@ -372,7 +372,7 @@ vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_tum(
@@ -381,7 +381,7 @@ vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_tum(
@@ -390,7 +390,7 @@ vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_tum(
@@ -399,7 +399,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_tum(
@@ -408,7 +408,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_tum(
@@ -417,7 +417,7 @@ vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_tum(
@@ -426,7 +426,7 @@ vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_tum(
@@ -435,7 +435,7 @@ vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_tum(
@@ -444,7 +444,7 @@ vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_tum(
@@ -453,7 +453,7 @@ vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_tum(
@@ -462,7 +462,7 @@ vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_tum(
@@ -471,7 +471,7 @@ vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tum(
@@ -480,7 +480,7 @@ vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tum(
@@ -489,7 +489,7 @@ vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_tum(
@@ -498,7 +498,7 @@ vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_tum(
@@ -507,7 +507,7 @@ vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_tum(
@@ -516,7 +516,7 @@ vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_tum(
@@ -525,7 +525,7 @@ vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_tum(
@@ -534,7 +534,7 @@ vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_tum(
@@ -543,7 +543,7 @@ vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_tumu(
@@ -552,7 +552,7 @@ vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_tumu(
@@ -561,7 +561,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_tumu(
@@ -570,7 +570,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_tumu(
@@ -579,7 +579,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_tumu(
@@ -588,7 +588,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_tumu(
@@ -597,7 +597,7 @@ vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_tumu(
@@ -606,7 +606,7 @@ vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_tumu(
@@ -615,7 +615,7 @@ vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_tumu(
@@ -624,7 +624,7 @@ vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_tumu(
@@ -633,7 +633,7 @@ vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_tumu(
@@ -642,7 +642,7 @@ vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_tumu(
@@ -651,7 +651,7 @@ vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_tumu(
@@ -660,7 +660,7 @@ vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_tumu(
@@ -669,7 +669,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_tumu(
@@ -678,7 +678,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_tumu(
@@ -687,7 +687,7 @@ vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_tumu(
@@ -696,7 +696,7 @@ vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_tumu(
@@ -705,7 +705,7 @@ vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_tumu(
@@ -714,7 +714,7 @@ vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_tumu(
@@ -723,7 +723,7 @@ vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_tumu(
@@ -732,7 +732,7 @@ vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_tumu(
@@ -741,7 +741,7 @@ vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tumu(
@@ -750,7 +750,7 @@ vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tumu(
@@ -759,7 +759,7 @@ vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_tumu(
@@ -768,7 +768,7 @@ vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_tumu(
@@ -777,7 +777,7 @@ vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_tumu(
@@ -786,7 +786,7 @@ vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_tumu(
@@ -795,7 +795,7 @@ vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_tumu(
@@ -804,7 +804,7 @@ vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_tumu(
@@ -813,7 +813,7 @@ vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_mu(
@@ -822,7 +822,7 @@ vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_mu(
@@ -831,7 +831,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_mu(
@@ -840,7 +840,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_mu(
@@ -849,7 +849,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_mu(
@@ -858,7 +858,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_mu(
@@ -867,7 +867,7 @@ vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_mu(
@@ -876,7 +876,7 @@ vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_mu(
@@ -885,7 +885,7 @@ vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_mu(
@@ -894,7 +894,7 @@ vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_mu(
@@ -903,7 +903,7 @@ vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_mu(
@@ -912,7 +912,7 @@ vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_mu(
@@ -921,7 +921,7 @@ vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_mu(
@@ -930,7 +930,7 @@ vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_mu(
@@ -939,7 +939,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_mu(
@@ -948,7 +948,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_mu(
@@ -957,7 +957,7 @@ vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_mu(
@@ -966,7 +966,7 @@ vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_mu(
@@ -975,7 +975,7 @@ vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_mu(
@@ -984,7 +984,7 @@ vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_mu(
@@ -993,7 +993,7 @@ vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_mu(
@@ -1002,7 +1002,7 @@ vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_mu(
@@ -1011,7 +1011,7 @@ vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_mu(
@@ -1020,7 +1020,7 @@ vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_mu(
@@ -1029,7 +1029,7 @@ vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_mu(
@@ -1038,7 +1038,7 @@ vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_mu(
@@ -1047,7 +1047,7 @@ vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_mu(
@@ -1056,7 +1056,7 @@ vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_mu(
@@ -1065,7 +1065,7 @@ vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_mu(
@@ -1074,7 +1074,7 @@ vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_mu(
@@ -1083,6 +1083,6 @@ vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulsu.c
index 29b3c13fdb73..47c45aad23d8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulsu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulsu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_tu(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_tu(
@@ -30,7 +30,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_tu(
@@ -39,7 +39,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_tu(
@@ -48,7 +48,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_tu(
@@ -57,7 +57,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_tu(
@@ -66,7 +66,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_tu(
@@ -75,7 +75,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_tu(
@@ -84,7 +84,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_tu(
@@ -93,7 +93,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_tu(
@@ -102,7 +102,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_tu(
@@ -111,7 +111,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_tu(
@@ -120,7 +120,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, uint8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_tu(
@@ -129,7 +129,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_tu(
@@ -138,7 +138,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_tu(
@@ -147,7 +147,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_tu(
@@ -156,7 +156,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, uint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_tu(
@@ -165,7 +165,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_tu(
@@ -174,7 +174,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_tu(
@@ -183,7 +183,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_tu(
@@ -192,7 +192,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_tu(
@@ -201,7 +201,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tu(
@@ -210,7 +210,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tu(
@@ -219,7 +219,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_tu(
@@ -228,7 +228,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, uint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_tu(
@@ -237,7 +237,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_tu(
@@ -246,7 +246,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_tu(
@@ -255,7 +255,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_tu(
@@ -264,7 +264,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_tu(
@@ -273,7 +273,7 @@ vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_tum(
@@ -282,7 +282,7 @@ vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_tum(
@@ -291,7 +291,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_tum(
@@ -300,7 +300,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_tum(
@@ -309,7 +309,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_tum(
@@ -318,7 +318,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_tum(
@@ -327,7 +327,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_tum(
@@ -336,7 +336,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_tum(
@@ -345,7 +345,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_tum(
@@ -354,7 +354,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_tum(
@@ -363,7 +363,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_tum(
@@ -372,7 +372,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_tum(
@@ -381,7 +381,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_tum(
@@ -390,7 +390,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_tum(
@@ -399,7 +399,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_tum(
@@ -408,7 +408,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vi
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_tum(
@@ -417,7 +417,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_tum(
@@ -426,7 +426,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_tum(
@@ -435,7 +435,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_tum(
@@ -444,7 +444,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_tum(
@@ -453,7 +453,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_tum(
@@ -462,7 +462,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_tum(
@@ -471,7 +471,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tum(
@@ -480,7 +480,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tum(
@@ -489,7 +489,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_tum(
@@ -498,7 +498,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_tum(
@@ -507,7 +507,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_tum(
@@ -516,7 +516,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_tum(
@@ -525,7 +525,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_tum(
@@ -534,7 +534,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_tum(
@@ -543,7 +543,7 @@ vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_tumu(
@@ -552,7 +552,7 @@ vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_tumu(
@@ -561,7 +561,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_tumu(
@@ -570,7 +570,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_tumu(
@@ -579,7 +579,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_tumu(
@@ -588,7 +588,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_tumu(
@@ -597,7 +597,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_tumu(
@@ -606,7 +606,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_tumu(
@@ -615,7 +615,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_tumu(
@@ -624,7 +624,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_tumu(
@@ -633,7 +633,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_tumu(
@@ -642,7 +642,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_tumu(
@@ -651,7 +651,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_tumu(
@@ -660,7 +660,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_tumu(
@@ -669,7 +669,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_tumu(
@@ -678,7 +678,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_tumu(
@@ -687,7 +687,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_tumu(
@@ -696,7 +696,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_tumu(
@@ -705,7 +705,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_tumu(
@@ -714,7 +714,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_tumu(
@@ -723,7 +723,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_tumu(
@@ -732,7 +732,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_tumu(
@@ -741,7 +741,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tumu(
@@ -750,7 +750,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tumu(
@@ -759,7 +759,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_tumu(
@@ -768,7 +768,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_tumu(
@@ -777,7 +777,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_tumu(
@@ -786,7 +786,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_tumu(
@@ -795,7 +795,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_tumu(
@@ -804,7 +804,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_tumu(
@@ -813,7 +813,7 @@ vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_mu(
@@ -822,7 +822,7 @@ vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_mu(
@@ -831,7 +831,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_mu(
@@ -840,7 +840,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_mu(
@@ -849,7 +849,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_mu(
@@ -858,7 +858,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_mu(
@@ -867,7 +867,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_mu(
@@ -876,7 +876,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_mu(
@@ -885,7 +885,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_mu(
@@ -894,7 +894,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_mu(
@@ -903,7 +903,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_mu(
@@ -912,7 +912,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_mu(
@@ -921,7 +921,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_mu(
@@ -930,7 +930,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_mu(
@@ -939,7 +939,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_mu(
@@ -948,7 +948,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_mu(
@@ -957,7 +957,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_mu(
@@ -966,7 +966,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_mu(
@@ -975,7 +975,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_mu(
@@ -984,7 +984,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_mu(
@@ -993,7 +993,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_mu(
@@ -1002,7 +1002,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_mu(
@@ -1011,7 +1011,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_mu(
@@ -1020,7 +1020,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_mu(
@@ -1029,7 +1029,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_mu(
@@ -1038,7 +1038,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_mu(
@@ -1047,7 +1047,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_mu(
@@ -1056,7 +1056,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_mu(
@@ -1065,7 +1065,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_mu(
@@ -1074,7 +1074,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_mu(
@@ -1083,6 +1083,6 @@ vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmulsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulsu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulu.c
index d4f93dfff38c..01b5fcaf4b13 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmulu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_tu(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_tu(
@@ -30,7 +30,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_tu(
@@ -39,7 +39,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_tu(
@@ -48,7 +48,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_tu(
@@ -57,7 +57,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_tu(
@@ -66,7 +66,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_tu(
@@ -75,7 +75,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_tu(
@@ -84,7 +84,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_tu(
@@ -93,7 +93,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_tu(
@@ -102,7 +102,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_tu(
@@ -111,7 +111,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_tu(
@@ -120,7 +120,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_tu(
@@ -129,7 +129,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_tu(
@@ -138,7 +138,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_tu(
@@ -147,7 +147,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_tu(
@@ -156,7 +156,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_tu(
@@ -165,7 +165,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_tu(
@@ -174,7 +174,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_tu(
@@ -183,7 +183,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_tu(
@@ -192,7 +192,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_tu(
@@ -201,7 +201,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tu(
@@ -210,7 +210,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tu(
@@ -219,7 +219,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_tu(
@@ -228,7 +228,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_tu(
@@ -237,7 +237,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_tu(
@@ -246,7 +246,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_tu(
@@ -255,7 +255,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_tu(
@@ -264,7 +264,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_tu(
@@ -273,7 +273,7 @@ vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_tum(
@@ -282,7 +282,7 @@ vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_tum(
@@ -291,7 +291,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_tum(
@@ -300,7 +300,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_tum(
@@ -309,7 +309,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_tum(
@@ -318,7 +318,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_tum(
@@ -327,7 +327,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_tum(
@@ -336,7 +336,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_tum(
@@ -345,7 +345,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_tum(
@@ -354,7 +354,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_tum(
@@ -363,7 +363,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_tum(
@@ -372,7 +372,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_tum(
@@ -381,7 +381,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_tum(
@@ -390,7 +390,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_tum(
@@ -399,7 +399,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_tum(
@@ -408,7 +408,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_tum(
@@ -417,7 +417,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_tum(
@@ -426,7 +426,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_tum(
@@ -435,7 +435,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_tum(
@@ -444,7 +444,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_tum(
@@ -453,7 +453,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_tum(
@@ -462,7 +462,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_tum(
@@ -471,7 +471,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tum(
@@ -480,7 +480,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tum(
@@ -489,7 +489,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_tum(
@@ -498,7 +498,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_tum(
@@ -507,7 +507,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_tum(
@@ -516,7 +516,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_tum(
@@ -525,7 +525,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_tum(
@@ -534,7 +534,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_tum(
@@ -543,7 +543,7 @@ vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_tumu(
@@ -552,7 +552,7 @@ vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_tumu(
@@ -561,7 +561,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_tumu(
@@ -570,7 +570,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_tumu(
@@ -579,7 +579,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_tumu(
@@ -588,7 +588,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_tumu(
@@ -597,7 +597,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_tumu(
@@ -606,7 +606,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_tumu(
@@ -615,7 +615,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_tumu(
@@ -624,7 +624,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_tumu(
@@ -633,7 +633,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_tumu(
@@ -642,7 +642,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_tumu(
@@ -651,7 +651,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_tumu(
@@ -660,7 +660,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_tumu(
@@ -669,7 +669,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_tumu(
@@ -678,7 +678,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_tumu(
@@ -687,7 +687,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_tumu(
@@ -696,7 +696,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_tumu(
@@ -705,7 +705,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_tumu(
@@ -714,7 +714,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_tumu(
@@ -723,7 +723,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_tumu(
@@ -732,7 +732,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_tumu(
@@ -741,7 +741,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tumu(
@@ -750,7 +750,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tumu(
@@ -759,7 +759,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_tumu(
@@ -768,7 +768,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_tumu(
@@ -777,7 +777,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_tumu(
@@ -786,7 +786,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_tumu(
@@ -795,7 +795,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_tumu(
@@ -804,7 +804,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_tumu(
@@ -813,7 +813,7 @@ vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulu_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_mu(
@@ -822,7 +822,7 @@ vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_mu(
@@ -831,7 +831,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_mu(
@@ -840,7 +840,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_mu(
@@ -849,7 +849,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_mu(
@@ -858,7 +858,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_mu(
@@ -867,7 +867,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_mu(
@@ -876,7 +876,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_mu(
@@ -885,7 +885,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_mu(
@@ -894,7 +894,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_mu(
@@ -903,7 +903,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_mu(
@@ -912,7 +912,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_mu(
@@ -921,7 +921,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_mu(
@@ -930,7 +930,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_mu(
@@ -939,7 +939,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_mu(
@@ -948,7 +948,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_mu(
@@ -957,7 +957,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_mu(
@@ -966,7 +966,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_mu(
@@ -975,7 +975,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_mu(
@@ -984,7 +984,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_mu(
@@ -993,7 +993,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_mu(
@@ -1002,7 +1002,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_mu(
@@ -1011,7 +1011,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_mu(
@@ -1020,7 +1020,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_mu(
@@ -1029,7 +1029,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_mu(
@@ -1038,7 +1038,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_mu(
@@ -1047,7 +1047,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_mu(
@@ -1056,7 +1056,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_mu(
@@ -1065,7 +1065,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_mu(
@@ -1074,7 +1074,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_mu(
@@ -1083,6 +1083,6 @@ vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmulu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwmulu_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsum.c
index cb1260cff8e4..e197a90a3875 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsum.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tu(
@@ -21,7 +21,7 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t maskedoff, vint8mf8_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tu(
@@ -30,7 +30,7 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t maskedoff, vint8mf4_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tu(
@@ -39,7 +39,7 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tu(
@@ -48,7 +48,7 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t maskedoff, vint8m1_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tu(
@@ -57,7 +57,7 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t maskedoff, vint8m2_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tu(
@@ -66,7 +66,7 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t maskedoff, vint8m4_t vector
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tu(
@@ -75,7 +75,7 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t maskedoff, vint8m8_t vector
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tu(
@@ -84,7 +84,7 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t maskedoff, vint16mf4_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tu(
@@ -93,7 +93,7 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tu(
@@ -102,7 +102,7 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t maskedoff, vint16m1_t vect
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tu(
@@ -111,7 +111,7 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t maskedoff, vint16m2_t vect
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tu(
@@ -120,7 +120,7 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t maskedoff, vint16m4_t vect
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tu(
@@ -129,7 +129,7 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t maskedoff, vint16m8_t vect
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tu(
@@ -138,7 +138,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t ve
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tu(
@@ -147,7 +147,7 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t maskedoff, vint32m1_t vect
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tu(
@@ -156,7 +156,7 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t maskedoff, vint32m2_t vect
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tu(
@@ -165,7 +165,7 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t maskedoff, vint32m4_t vect
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_tum(
@@ -174,7 +174,7 @@ vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t maskedoff, vint32m8_t vect
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tum(
@@ -183,7 +183,7 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tum(
@@ -192,7 +192,7 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tum(
@@ -201,7 +201,7 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tum(
@@ -210,7 +210,7 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tum(
@@ -219,7 +219,7 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tum(
@@ -228,7 +228,7 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tum(
@@ -237,7 +237,7 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t mask, vint16m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tum(
@@ -246,7 +246,7 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t mask, vint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tum(
@@ -255,7 +255,7 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tum(
@@ -264,7 +264,7 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tum(
@@ -273,7 +273,7 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tum(
@@ -282,7 +282,7 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum(
@@ -291,7 +291,7 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t mask, vint32m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tum(
@@ -300,7 +300,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tum(
@@ -309,7 +309,7 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tum(
@@ -318,7 +318,7 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tum(
@@ -327,6 +327,6 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) {
- return vwredsum_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsumu.c
index 273be2b8789f..d95c271d950d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsumu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwredsumu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tu(
@@ -21,7 +21,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t maskedoff, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tu(
@@ -30,7 +30,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t maskedoff, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tu(
@@ -39,7 +39,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tu(
@@ -48,7 +48,7 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t maskedoff, vuint8m1_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tu(
@@ -57,7 +57,7 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t maskedoff, vuint8m2_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tu(
@@ -66,7 +66,7 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t maskedoff, vuint8m4_t ve
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tu(
@@ -75,7 +75,7 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t maskedoff, vuint8m8_t ve
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tu(
@@ -84,7 +84,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t maskedoff, vuint16mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tu(
@@ -93,7 +93,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tu(
@@ -102,7 +102,7 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t maskedoff, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tu(
@@ -111,7 +111,7 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t maskedoff, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tu(
@@ -120,7 +120,7 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t maskedoff, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tu(
@@ -129,7 +129,7 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t maskedoff, vuint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tu(
@@ -138,7 +138,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tu(
@@ -147,7 +147,7 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t maskedoff, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tu(
@@ -156,7 +156,7 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t maskedoff, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tu(
@@ -165,7 +165,7 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t maskedoff, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tu(maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_tum(
@@ -174,7 +174,7 @@ vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t maskedoff, vuint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tum(
@@ -183,7 +183,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t mask, vuint16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tum(
@@ -192,7 +192,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t mask, vuint16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tum(
@@ -201,7 +201,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t mask, vuint16m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tum(
@@ -210,7 +210,7 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tum(
@@ -219,7 +219,7 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tum(
@@ -228,7 +228,7 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tum(
@@ -237,7 +237,7 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t mask, vuint16m1_t maskedof
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tum(
@@ -246,7 +246,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t mask, vuint32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tum(
@@ -255,7 +255,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t mask, vuint32m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tum(
@@ -264,7 +264,7 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t mask, vuint32m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tum(
@@ -273,7 +273,7 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tum(
@@ -282,7 +282,7 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum(
@@ -291,7 +291,7 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t mask, vuint32m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tum(
@@ -300,7 +300,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t maske
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tum(
@@ -309,7 +309,7 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t mask, vuint64m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tum(
@@ -318,7 +318,7 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t mask, vuint64m1_t masked
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tum(
@@ -327,6 +327,6 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t mask, vuint64m1_t maskedo
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
- return vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
+ return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsub.c
index 22b79b45378e..0053dff6fcf4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsub.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_tu(
@@ -21,7 +21,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_tu(
@@ -30,7 +30,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_tu(
@@ -39,7 +39,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_tu(
@@ -48,7 +48,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_tu(
@@ -57,7 +57,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_tu(
@@ -66,7 +66,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_tu(
@@ -75,7 +75,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_tu(
@@ -84,7 +84,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_tu(
@@ -93,7 +93,7 @@ vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_tu(
@@ -102,7 +102,7 @@ vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_tu(
@@ -111,7 +111,7 @@ vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_tu(
@@ -120,7 +120,7 @@ vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_tu(
@@ -129,7 +129,7 @@ vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_tu(
@@ -138,7 +138,7 @@ vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_tu(
@@ -147,7 +147,7 @@ vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_tu(
@@ -156,7 +156,7 @@ vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_tu(
@@ -165,7 +165,7 @@ vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_tu(
@@ -174,7 +174,7 @@ vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_tu(
@@ -183,7 +183,7 @@ vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_tu(
@@ -192,7 +192,7 @@ vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_tu(
@@ -201,7 +201,7 @@ vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_tu(
@@ -210,7 +210,7 @@ vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_tu(
@@ -219,7 +219,7 @@ vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_tu(
@@ -228,7 +228,7 @@ vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_tu(
@@ -237,7 +237,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_tu(
@@ -291,7 +291,7 @@ vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_tu(
@@ -300,7 +300,7 @@ vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_tu(
@@ -309,7 +309,7 @@ vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_tu(
@@ -318,7 +318,7 @@ vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_tu(
@@ -327,7 +327,7 @@ vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_tu(
@@ -336,7 +336,7 @@ vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_tu(
@@ -345,7 +345,7 @@ vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_tu(
@@ -354,7 +354,7 @@ vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_tu(
@@ -363,7 +363,7 @@ vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_tu(
@@ -372,7 +372,7 @@ vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_tu(
@@ -381,7 +381,7 @@ vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_tu(
@@ -390,7 +390,7 @@ vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_tu(
@@ -399,7 +399,7 @@ vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tu(
@@ -408,7 +408,7 @@ vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tu(
@@ -417,7 +417,7 @@ vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tu(
@@ -426,7 +426,7 @@ vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tu(
@@ -435,7 +435,7 @@ vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_tu(
@@ -444,7 +444,7 @@ vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_tu(
@@ -453,7 +453,7 @@ vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_tu(
@@ -462,7 +462,7 @@ vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_tu(
@@ -471,7 +471,7 @@ vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_tu(
@@ -480,7 +480,7 @@ vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_tu(
@@ -489,7 +489,7 @@ vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_tu(
@@ -498,7 +498,7 @@ vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_tu(
@@ -507,7 +507,7 @@ vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_tu(
@@ -516,7 +516,7 @@ vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_tu(
@@ -525,7 +525,7 @@ vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_tu(
@@ -534,7 +534,7 @@ vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_tu(
@@ -543,7 +543,7 @@ vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_tum(
@@ -552,7 +552,7 @@ vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_tum(
@@ -561,7 +561,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_tum(
@@ -570,7 +570,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_tum(
@@ -579,7 +579,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_tum(
@@ -588,7 +588,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_tum(
@@ -597,7 +597,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_tum(
@@ -606,7 +606,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_tum(
@@ -615,7 +615,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_tum(
@@ -624,7 +624,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_tum(
@@ -633,7 +633,7 @@ vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_tum(
@@ -642,7 +642,7 @@ vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_tum(
@@ -651,7 +651,7 @@ vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_tum(
@@ -660,7 +660,7 @@ vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_tum(
@@ -669,7 +669,7 @@ vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_tum(
@@ -678,7 +678,7 @@ vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_tum(
@@ -687,7 +687,7 @@ vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_tum(
@@ -696,7 +696,7 @@ vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_tum(
@@ -705,7 +705,7 @@ vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_tum(
@@ -714,7 +714,7 @@ vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_tum(
@@ -723,7 +723,7 @@ vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_tum(
@@ -732,7 +732,7 @@ vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_tum(
@@ -741,7 +741,7 @@ vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_tum(
@@ -750,7 +750,7 @@ vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_tum(
@@ -759,7 +759,7 @@ vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_tum(
@@ -768,7 +768,7 @@ vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_tum(
@@ -777,7 +777,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_tum(
@@ -786,7 +786,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_tum(
@@ -795,7 +795,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_tum(
@@ -804,7 +804,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_tum(
@@ -813,7 +813,7 @@ vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_tum(
@@ -822,7 +822,7 @@ vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_tum(
@@ -831,7 +831,7 @@ vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_tum(
@@ -840,7 +840,7 @@ vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_tum(
@@ -849,7 +849,7 @@ vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_tum(
@@ -858,7 +858,7 @@ vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_tum(
@@ -867,7 +867,7 @@ vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_tum(
@@ -876,7 +876,7 @@ vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_tum(
@@ -885,7 +885,7 @@ vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_tum(
@@ -894,7 +894,7 @@ vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_tum(
@@ -903,7 +903,7 @@ vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_tum(
@@ -912,7 +912,7 @@ vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_tum(
@@ -921,7 +921,7 @@ vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_tum(
@@ -930,7 +930,7 @@ vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_tum(
@@ -939,7 +939,7 @@ vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tum(
@@ -948,7 +948,7 @@ vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tum(
@@ -957,7 +957,7 @@ vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tum(
@@ -966,7 +966,7 @@ vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tum(
@@ -975,7 +975,7 @@ vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_tum(
@@ -984,7 +984,7 @@ vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_tum(
@@ -993,7 +993,7 @@ vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_tum(
@@ -1002,7 +1002,7 @@ vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_tum(
@@ -1011,7 +1011,7 @@ vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_tum(
@@ -1020,7 +1020,7 @@ vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_tum(
@@ -1029,7 +1029,7 @@ vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_tum(
@@ -1038,7 +1038,7 @@ vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_tum(
@@ -1047,7 +1047,7 @@ vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_tum(
@@ -1056,7 +1056,7 @@ vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_tum(
@@ -1065,7 +1065,7 @@ vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_tum(
@@ -1074,7 +1074,7 @@ vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_tum(
@@ -1083,7 +1083,7 @@ vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_tumu(
@@ -1092,7 +1092,7 @@ vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_tumu(
@@ -1101,7 +1101,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_tumu(
@@ -1110,7 +1110,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_tumu(
@@ -1119,7 +1119,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_tumu(
@@ -1128,7 +1128,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_tumu(
@@ -1137,7 +1137,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_tumu(
@@ -1146,7 +1146,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_tumu(
@@ -1155,7 +1155,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_tumu(
@@ -1164,7 +1164,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_tumu(
@@ -1173,7 +1173,7 @@ vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_tumu(
@@ -1182,7 +1182,7 @@ vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_tumu(
@@ -1191,7 +1191,7 @@ vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_tumu(
@@ -1200,7 +1200,7 @@ vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_tumu(
@@ -1209,7 +1209,7 @@ vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_tumu(
@@ -1218,7 +1218,7 @@ vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_tumu(
@@ -1227,7 +1227,7 @@ vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_tumu(
@@ -1236,7 +1236,7 @@ vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_tumu(
@@ -1245,7 +1245,7 @@ vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_tumu(
@@ -1254,7 +1254,7 @@ vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_tumu(
@@ -1263,7 +1263,7 @@ vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_tumu(
@@ -1272,7 +1272,7 @@ vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_tumu(
@@ -1281,7 +1281,7 @@ vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_tumu(
@@ -1290,7 +1290,7 @@ vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_tumu(
@@ -1299,7 +1299,7 @@ vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_tumu(
@@ -1308,7 +1308,7 @@ vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_tumu(
@@ -1317,7 +1317,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_tumu(
@@ -1326,7 +1326,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_tumu(
@@ -1335,7 +1335,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_tumu(
@@ -1344,7 +1344,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_tumu(
@@ -1353,7 +1353,7 @@ vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_tumu(
@@ -1362,7 +1362,7 @@ vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_tumu(
@@ -1371,7 +1371,7 @@ vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_tumu(
@@ -1380,7 +1380,7 @@ vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_tumu(
@@ -1389,7 +1389,7 @@ vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_tumu(
@@ -1398,7 +1398,7 @@ vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_tumu(
@@ -1407,7 +1407,7 @@ vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_tumu(
@@ -1416,7 +1416,7 @@ vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_tumu(
@@ -1425,7 +1425,7 @@ vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_tumu(
@@ -1434,7 +1434,7 @@ vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_tumu(
@@ -1443,7 +1443,7 @@ vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_tumu(
@@ -1452,7 +1452,7 @@ vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_tumu(
@@ -1461,7 +1461,7 @@ vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_tumu(
@@ -1470,7 +1470,7 @@ vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_tumu(
@@ -1479,7 +1479,7 @@ vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tumu(
@@ -1488,7 +1488,7 @@ vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tumu(
@@ -1497,7 +1497,7 @@ vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tumu(
@@ -1506,7 +1506,7 @@ vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tumu(
@@ -1515,7 +1515,7 @@ vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_tumu(
@@ -1524,7 +1524,7 @@ vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_tumu(
@@ -1533,7 +1533,7 @@ vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_tumu(
@@ -1542,7 +1542,7 @@ vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_tumu(
@@ -1551,7 +1551,7 @@ vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_tumu(
@@ -1560,7 +1560,7 @@ vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_tumu(
@@ -1569,7 +1569,7 @@ vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_tumu(
@@ -1578,7 +1578,7 @@ vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_tumu(
@@ -1587,7 +1587,7 @@ vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_tumu(
@@ -1596,7 +1596,7 @@ vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_tumu(
@@ -1605,7 +1605,7 @@ vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_tumu(
@@ -1614,7 +1614,7 @@ vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_tumu(
@@ -1623,7 +1623,7 @@ vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_mu(
@@ -1632,7 +1632,7 @@ vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_mu(
@@ -1641,7 +1641,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_mu(
@@ -1650,7 +1650,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_mu(
@@ -1659,7 +1659,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_mu(
@@ -1668,7 +1668,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_mu(
@@ -1677,7 +1677,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_mu(
@@ -1686,7 +1686,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_mu(
@@ -1695,7 +1695,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_mu(
@@ -1704,7 +1704,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_mu(
@@ -1713,7 +1713,7 @@ vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_mu(
@@ -1722,7 +1722,7 @@ vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_mu(
@@ -1731,7 +1731,7 @@ vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_mu(
@@ -1740,7 +1740,7 @@ vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_mu(
@@ -1749,7 +1749,7 @@ vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_mu(
@@ -1758,7 +1758,7 @@ vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_mu(
@@ -1767,7 +1767,7 @@ vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_mu(
@@ -1776,7 +1776,7 @@ vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_mu(
@@ -1785,7 +1785,7 @@ vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_mu(
@@ -1794,7 +1794,7 @@ vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_mu(
@@ -1803,7 +1803,7 @@ vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_mu(
@@ -1812,7 +1812,7 @@ vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_mu(
@@ -1821,7 +1821,7 @@ vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_mu(
@@ -1830,7 +1830,7 @@ vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_mu(
@@ -1839,7 +1839,7 @@ vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_mu(
@@ -1848,7 +1848,7 @@ vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_mu(
@@ -1857,7 +1857,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_mu(
@@ -1866,7 +1866,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_mu(
@@ -1875,7 +1875,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_mu(
@@ -1884,7 +1884,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_mu(
@@ -1893,7 +1893,7 @@ vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_mu(
@@ -1902,7 +1902,7 @@ vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_mu(
@@ -1911,7 +1911,7 @@ vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_mu(
@@ -1920,7 +1920,7 @@ vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_mu(
@@ -1929,7 +1929,7 @@ vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_mu(
@@ -1938,7 +1938,7 @@ vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_mu(
@@ -1947,7 +1947,7 @@ vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_mu(
@@ -1956,7 +1956,7 @@ vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_mu(
@@ -1965,7 +1965,7 @@ vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_mu(
@@ -1974,7 +1974,7 @@ vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_mu(
@@ -1983,7 +1983,7 @@ vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_mu(
@@ -1992,7 +1992,7 @@ vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_mu(
@@ -2001,7 +2001,7 @@ vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_mu(
@@ -2010,7 +2010,7 @@ vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_mu(
@@ -2019,7 +2019,7 @@ vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_mu(
@@ -2028,7 +2028,7 @@ vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_mu(
@@ -2037,7 +2037,7 @@ vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_mu(
@@ -2046,7 +2046,7 @@ vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_mu(
@@ -2055,7 +2055,7 @@ vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_mu(
@@ -2064,7 +2064,7 @@ vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_mu(
@@ -2073,7 +2073,7 @@ vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_mu(
@@ -2082,7 +2082,7 @@ vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_mu(
@@ -2091,7 +2091,7 @@ vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_mu(
@@ -2100,7 +2100,7 @@ vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_mu(
@@ -2109,7 +2109,7 @@ vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_mu(
@@ -2118,7 +2118,7 @@ vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_mu(
@@ -2127,7 +2127,7 @@ vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_mu(
@@ -2136,7 +2136,7 @@ vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_mu(
@@ -2145,7 +2145,7 @@ vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_mu(
@@ -2154,7 +2154,7 @@ vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) {
- return vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_mu(
@@ -2163,6 +2163,6 @@ vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwsub_wx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) {
- return vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsubu.c
index 609567af349b..7496c77d54bd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsubu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsubu.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_tu(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_tu(
@@ -30,7 +30,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_tu(
@@ -39,7 +39,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_tu(
@@ -48,7 +48,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_tu(
@@ -57,7 +57,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_tu(
@@ -66,7 +66,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, u
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_tu(
@@ -75,7 +75,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_tu(
@@ -84,7 +84,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_tu(
@@ -93,7 +93,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_tu(
@@ -102,7 +102,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_tu(
@@ -111,7 +111,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_tu(
@@ -120,7 +120,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_tu(
@@ -129,7 +129,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_tu(
@@ -138,7 +138,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_tu(
@@ -147,7 +147,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_tu(
@@ -156,7 +156,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_tu(
@@ -165,7 +165,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_tu(
@@ -174,7 +174,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_tu(
@@ -183,7 +183,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_tu(
@@ -192,7 +192,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_tu(
@@ -201,7 +201,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_tu(
@@ -210,7 +210,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_tu(
@@ -219,7 +219,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_tu(
@@ -228,7 +228,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_tu(
@@ -237,7 +237,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_tu(
@@ -246,7 +246,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_tu(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_tu(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_tu(
@@ -273,7 +273,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_tu(
@@ -282,7 +282,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_tu(
@@ -291,7 +291,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_tu(
@@ -300,7 +300,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_tu(
@@ -309,7 +309,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_tu(
@@ -318,7 +318,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_tu(
@@ -327,7 +327,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_tu(
@@ -336,7 +336,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_tu(
@@ -345,7 +345,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_tu(
@@ -354,7 +354,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_tu(
@@ -363,7 +363,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_tu(
@@ -372,7 +372,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_tu(
@@ -381,7 +381,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_tu(
@@ -390,7 +390,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_tu(
@@ -399,7 +399,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tu(
@@ -408,7 +408,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tu(
@@ -417,7 +417,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tu(
@@ -426,7 +426,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tu(
@@ -435,7 +435,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_tu(
@@ -444,7 +444,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_tu(
@@ -453,7 +453,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_tu(
@@ -462,7 +462,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_tu(
@@ -471,7 +471,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_tu(
@@ -480,7 +480,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_tu(
@@ -489,7 +489,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_tu(
@@ -498,7 +498,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_tu(
@@ -507,7 +507,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_tu(
@@ -516,7 +516,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_vv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_tu(
@@ -525,7 +525,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_tu(
@@ -534,7 +534,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_wv_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_tu(
@@ -543,7 +543,7 @@ vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tu(maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_tum(
@@ -552,7 +552,7 @@ vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_tum(
@@ -561,7 +561,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_tum(
@@ -570,7 +570,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_tum(
@@ -579,7 +579,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_tum(
@@ -588,7 +588,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_tum(
@@ -597,7 +597,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_tum(
@@ -606,7 +606,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_tum(
@@ -615,7 +615,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_tum(
@@ -624,7 +624,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_tum(
@@ -633,7 +633,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_tum(
@@ -642,7 +642,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_tum(
@@ -651,7 +651,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_tum(
@@ -660,7 +660,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_tum(
@@ -669,7 +669,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_tum(
@@ -678,7 +678,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_tum(
@@ -687,7 +687,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_tum(
@@ -696,7 +696,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_tum(
@@ -705,7 +705,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_tum(
@@ -714,7 +714,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_tum(
@@ -723,7 +723,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_tum(
@@ -732,7 +732,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_tum(
@@ -741,7 +741,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_tum(
@@ -750,7 +750,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_tum(
@@ -759,7 +759,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_tum(
@@ -768,7 +768,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_tum(
@@ -777,7 +777,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_tum(
@@ -786,7 +786,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_tum(
@@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_tum(
@@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_tum(
@@ -813,7 +813,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_tum(
@@ -822,7 +822,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_tum(
@@ -831,7 +831,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_tum(
@@ -840,7 +840,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_tum(
@@ -849,7 +849,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_tum(
@@ -858,7 +858,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_tum(
@@ -867,7 +867,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_tum(
@@ -876,7 +876,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_tum(
@@ -885,7 +885,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_tum(
@@ -894,7 +894,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_tum(
@@ -903,7 +903,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_tum(
@@ -912,7 +912,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_tum(
@@ -921,7 +921,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_tum(
@@ -930,7 +930,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_tum(
@@ -939,7 +939,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tum(
@@ -948,7 +948,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tum(
@@ -957,7 +957,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tum(
@@ -966,7 +966,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tum(
@@ -975,7 +975,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_tum(
@@ -984,7 +984,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_tum(
@@ -993,7 +993,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_tum(
@@ -1002,7 +1002,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_tum(
@@ -1011,7 +1011,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_tum(
@@ -1020,7 +1020,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_tum(
@@ -1029,7 +1029,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_tum(
@@ -1038,7 +1038,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_tum(
@@ -1047,7 +1047,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_tum(
@@ -1056,7 +1056,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_tum(
@@ -1065,7 +1065,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_tum(
@@ -1074,7 +1074,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_tum(
@@ -1083,7 +1083,7 @@ vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_tumu(
@@ -1092,7 +1092,7 @@ vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_tumu(
@@ -1101,7 +1101,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_tumu(
@@ -1110,7 +1110,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_tumu(
@@ -1119,7 +1119,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_tumu(
@@ -1128,7 +1128,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_tumu(
@@ -1137,7 +1137,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_tumu(
@@ -1146,7 +1146,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_tumu(
@@ -1155,7 +1155,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_tumu(
@@ -1164,7 +1164,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_tumu(
@@ -1173,7 +1173,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_tumu(
@@ -1182,7 +1182,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_tumu(
@@ -1191,7 +1191,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_tumu(
@@ -1200,7 +1200,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_tumu(
@@ -1209,7 +1209,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_tumu(
@@ -1218,7 +1218,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_tumu(
@@ -1227,7 +1227,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_tumu(
@@ -1236,7 +1236,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_tumu(
@@ -1245,7 +1245,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_tumu(
@@ -1254,7 +1254,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_tumu(
@@ -1263,7 +1263,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_tumu(
@@ -1272,7 +1272,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_tumu(
@@ -1281,7 +1281,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_tumu(
@@ -1290,7 +1290,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_tumu(
@@ -1299,7 +1299,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_tumu(
@@ -1308,7 +1308,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_tumu(
@@ -1317,7 +1317,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_tumu(
@@ -1326,7 +1326,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_tumu(
@@ -1335,7 +1335,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_tumu(
@@ -1344,7 +1344,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_tumu(
@@ -1353,7 +1353,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_tumu(
@@ -1362,7 +1362,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_tumu(
@@ -1371,7 +1371,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_tumu(
@@ -1380,7 +1380,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_tumu(
@@ -1389,7 +1389,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_tumu(
@@ -1398,7 +1398,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_tumu(
@@ -1407,7 +1407,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_tumu(
@@ -1416,7 +1416,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_tumu(
@@ -1425,7 +1425,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_tumu(
@@ -1434,7 +1434,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_tumu(
@@ -1443,7 +1443,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_tumu(
@@ -1452,7 +1452,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_tumu(
@@ -1461,7 +1461,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_tumu(
@@ -1470,7 +1470,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_tumu(
@@ -1479,7 +1479,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tumu(
@@ -1488,7 +1488,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tumu(
@@ -1497,7 +1497,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tumu(
@@ -1506,7 +1506,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tumu(
@@ -1515,7 +1515,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_tumu(
@@ -1524,7 +1524,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_tumu(
@@ -1533,7 +1533,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_tumu(
@@ -1542,7 +1542,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_tumu(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_tumu(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_tumu(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_tumu(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_tumu(
@@ -1587,7 +1587,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_tumu(
@@ -1596,7 +1596,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_tumu(
@@ -1605,7 +1605,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_tumu(
@@ -1614,7 +1614,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_tumu(
@@ -1623,7 +1623,7 @@ vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_mu(
@@ -1632,7 +1632,7 @@ vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_mu(
@@ -1641,7 +1641,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_mu(
@@ -1650,7 +1650,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_mu(
@@ -1659,7 +1659,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_mu(
@@ -1668,7 +1668,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_mu(
@@ -1677,7 +1677,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_mu(
@@ -1686,7 +1686,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_mu(
@@ -1695,7 +1695,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_mu(
@@ -1704,7 +1704,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_mu(
@@ -1713,7 +1713,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_mu(
@@ -1722,7 +1722,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_mu(
@@ -1731,7 +1731,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_mu(
@@ -1740,7 +1740,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_mu(
@@ -1749,7 +1749,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_mu(
@@ -1758,7 +1758,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_mu(
@@ -1767,7 +1767,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_mu(
@@ -1776,7 +1776,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_mu(
@@ -1785,7 +1785,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_mu(
@@ -1794,7 +1794,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_mu(
@@ -1803,7 +1803,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_mu(
@@ -1812,7 +1812,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_mu(
@@ -1821,7 +1821,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_mu(
@@ -1830,7 +1830,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_mu(
@@ -1839,7 +1839,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_mu(
@@ -1848,7 +1848,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_mu(
@@ -1857,7 +1857,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_mu(
@@ -1866,7 +1866,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_mu(
@@ -1875,7 +1875,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_mu(
@@ -1884,7 +1884,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_mu(
@@ -1893,7 +1893,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_mu(
@@ -1902,7 +1902,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_mu(
@@ -1911,7 +1911,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_mu(
@@ -1920,7 +1920,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_mu(
@@ -1929,7 +1929,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_mu(
@@ -1938,7 +1938,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_mu(
@@ -1947,7 +1947,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_mu(
@@ -1956,7 +1956,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_mu(
@@ -1965,7 +1965,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_mu(
@@ -1974,7 +1974,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_mu(
@@ -1983,7 +1983,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_mu(
@@ -1992,7 +1992,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_mu(
@@ -2001,7 +2001,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_mu(
@@ -2010,7 +2010,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_mu(
@@ -2019,7 +2019,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_mu(
@@ -2028,7 +2028,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_mu(
@@ -2037,7 +2037,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_mu(
@@ -2046,7 +2046,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_mu(
@@ -2055,7 +2055,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_mu(
@@ -2064,7 +2064,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_mu(
@@ -2073,7 +2073,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_mu(
@@ -2082,7 +2082,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_mu(
@@ -2091,7 +2091,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_mu(
@@ -2100,7 +2100,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_mu(
@@ -2109,7 +2109,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_mu(
@@ -2118,7 +2118,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_mu(
@@ -2127,7 +2127,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_mu(
@@ -2136,7 +2136,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_mu(
@@ -2145,7 +2145,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_mu(
@@ -2154,7 +2154,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
- return vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_mu(
@@ -2163,6 +2163,6 @@ vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwsubu_wx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) {
- return vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vxor.c
index 1ef1dd96e4dd..d40a95e4d5b1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vxor.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_tu(
@@ -21,7 +21,7 @@ vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_tu(
@@ -30,7 +30,7 @@ vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_tu(
@@ -39,7 +39,7 @@ vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_tu(
@@ -48,7 +48,7 @@ vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_tu(
@@ -57,7 +57,7 @@ vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_tu(
@@ -66,7 +66,7 @@ vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_tu(
@@ -75,7 +75,7 @@ vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_tu(
@@ -84,7 +84,7 @@ vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_tu(
@@ -93,7 +93,7 @@ vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_tu(
@@ -102,7 +102,7 @@ vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_tu(
@@ -111,7 +111,7 @@ vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_tu(
@@ -120,7 +120,7 @@ vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_tu(
@@ -129,7 +129,7 @@ vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_tu(
@@ -138,7 +138,7 @@ vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_tu(
@@ -147,7 +147,7 @@ vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_tu(
@@ -156,7 +156,7 @@ vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_tu(
@@ -165,7 +165,7 @@ vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_tu(
@@ -174,7 +174,7 @@ vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_tu(
@@ -183,7 +183,7 @@ vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_tu(
@@ -192,7 +192,7 @@ vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_tu(
@@ -201,7 +201,7 @@ vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_tu(
@@ -210,7 +210,7 @@ vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_tu(
@@ -219,7 +219,7 @@ vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_tu(
@@ -228,7 +228,7 @@ vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_tu(
@@ -237,7 +237,7 @@ vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tu(
@@ -246,7 +246,7 @@ vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tu(
@@ -255,7 +255,7 @@ vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_tu(
@@ -264,7 +264,7 @@ vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_tu(
@@ -273,7 +273,7 @@ vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_tu(
@@ -282,7 +282,7 @@ vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_tu(
@@ -291,7 +291,7 @@ vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_tu(
@@ -300,7 +300,7 @@ vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_tu(
@@ -309,7 +309,7 @@ vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_tu(
@@ -318,7 +318,7 @@ vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_tu(
@@ -327,7 +327,7 @@ vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_tu(
@@ -336,7 +336,7 @@ vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_tu(
@@ -345,7 +345,7 @@ vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_tu(
@@ -354,7 +354,7 @@ vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_tu(
@@ -363,7 +363,7 @@ vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_tu(
@@ -372,7 +372,7 @@ vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_tu(
@@ -381,7 +381,7 @@ vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_tu(
@@ -390,7 +390,7 @@ vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_tu(
@@ -399,7 +399,7 @@ vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_tu(
@@ -408,7 +408,7 @@ vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_tu(
@@ -417,7 +417,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_tu(
@@ -426,7 +426,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_tu(
@@ -435,7 +435,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_tu(
@@ -444,7 +444,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_tu(
@@ -453,7 +453,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_tu(
@@ -462,7 +462,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_tu(
@@ -471,7 +471,7 @@ vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_tu(
@@ -480,7 +480,7 @@ vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_tu(
@@ -489,7 +489,7 @@ vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_tu(
@@ -498,7 +498,7 @@ vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_tu(
@@ -507,7 +507,7 @@ vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_tu(
@@ -516,7 +516,7 @@ vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_tu(
@@ -525,7 +525,7 @@ vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_tu(
@@ -534,7 +534,7 @@ vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_tu(
@@ -543,7 +543,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_tu(
@@ -552,7 +552,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_tu(
@@ -561,7 +561,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_tu(
@@ -570,7 +570,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_tu(
@@ -579,7 +579,7 @@ vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_tu(
@@ -588,7 +588,7 @@ vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_tu(
@@ -597,7 +597,7 @@ vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_tu(
@@ -606,7 +606,7 @@ vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_tu(
@@ -615,7 +615,7 @@ vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_tu(
@@ -624,7 +624,7 @@ vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_tu(
@@ -633,7 +633,7 @@ vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tu(
@@ -642,7 +642,7 @@ vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tu(
@@ -651,7 +651,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_tu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_tu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_tu(
@@ -678,7 +678,7 @@ vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_tu(
@@ -687,7 +687,7 @@ vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_tu(
@@ -696,7 +696,7 @@ vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_tu(
@@ -705,7 +705,7 @@ vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_tu(
@@ -714,7 +714,7 @@ vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_tu(
@@ -723,7 +723,7 @@ vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_tu(
@@ -732,7 +732,7 @@ vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_tu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_tu(
@@ -750,7 +750,7 @@ vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_tu(
@@ -759,7 +759,7 @@ vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_tu(
@@ -768,7 +768,7 @@ vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_tu(
@@ -777,7 +777,7 @@ vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_tu(
@@ -786,7 +786,7 @@ vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_tu(
@@ -795,7 +795,7 @@ vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vxor_tu(maskedoff, op1, op2, vl);
+ return __riscv_vxor_tu(maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_tum(
@@ -804,7 +804,7 @@ vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_tum(
@@ -813,7 +813,7 @@ vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_tum(
@@ -822,7 +822,7 @@ vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_tum(
@@ -831,7 +831,7 @@ vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_tum(
@@ -840,7 +840,7 @@ vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_tum(
@@ -849,7 +849,7 @@ vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_tum(
@@ -858,7 +858,7 @@ vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_tum(
@@ -867,7 +867,7 @@ vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_tum(
@@ -876,7 +876,7 @@ vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_tum(
@@ -885,7 +885,7 @@ vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_tum(
@@ -894,7 +894,7 @@ vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_tum(
@@ -903,7 +903,7 @@ vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_tum(
@@ -912,7 +912,7 @@ vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_tum(
@@ -921,7 +921,7 @@ vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_tum(
@@ -930,7 +930,7 @@ vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_tum(
@@ -939,7 +939,7 @@ vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_tum(
@@ -948,7 +948,7 @@ vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_tum(
@@ -957,7 +957,7 @@ vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_tum(
@@ -966,7 +966,7 @@ vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_tum(
@@ -975,7 +975,7 @@ vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_tum(
@@ -984,7 +984,7 @@ vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_tum(
@@ -993,7 +993,7 @@ vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_tum(
@@ -1002,7 +1002,7 @@ vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_tum(
@@ -1011,7 +1011,7 @@ vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_tum(
@@ -1020,7 +1020,7 @@ vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_tum(
@@ -1029,7 +1029,7 @@ vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tum(
@@ -1038,7 +1038,7 @@ vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tum(
@@ -1047,7 +1047,7 @@ vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_tum(
@@ -1056,7 +1056,7 @@ vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_tum(
@@ -1065,7 +1065,7 @@ vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_tum(
@@ -1074,7 +1074,7 @@ vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_tum(
@@ -1083,7 +1083,7 @@ vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_tum(
@@ -1092,7 +1092,7 @@ vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_tum(
@@ -1101,7 +1101,7 @@ vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_tum(
@@ -1110,7 +1110,7 @@ vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_tum(
@@ -1119,7 +1119,7 @@ vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_tum(
@@ -1128,7 +1128,7 @@ vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_tum(
@@ -1137,7 +1137,7 @@ vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_tum(
@@ -1146,7 +1146,7 @@ vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_tum(
@@ -1155,7 +1155,7 @@ vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_tum(
@@ -1164,7 +1164,7 @@ vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_tum(
@@ -1173,7 +1173,7 @@ vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_tum(
@@ -1182,7 +1182,7 @@ vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_tum(
@@ -1191,7 +1191,7 @@ vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_tum(
@@ -1200,7 +1200,7 @@ vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_tum(
@@ -1209,7 +1209,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_tum(
@@ -1218,7 +1218,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_tum(
@@ -1227,7 +1227,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_tum(
@@ -1236,7 +1236,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_tum(
@@ -1245,7 +1245,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_tum(
@@ -1254,7 +1254,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_tum(
@@ -1263,7 +1263,7 @@ vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_tum(
@@ -1272,7 +1272,7 @@ vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_tum(
@@ -1281,7 +1281,7 @@ vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_tum(
@@ -1290,7 +1290,7 @@ vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_tum(
@@ -1299,7 +1299,7 @@ vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_tum(
@@ -1308,7 +1308,7 @@ vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_tum(
@@ -1317,7 +1317,7 @@ vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_tum(
@@ -1326,7 +1326,7 @@ vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_tum(
@@ -1335,7 +1335,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_tum(
@@ -1344,7 +1344,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_tum(
@@ -1353,7 +1353,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_tum(
@@ -1362,7 +1362,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_tum(
@@ -1371,7 +1371,7 @@ vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_tum(
@@ -1380,7 +1380,7 @@ vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_tum(
@@ -1389,7 +1389,7 @@ vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_tum(
@@ -1398,7 +1398,7 @@ vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_tum(
@@ -1407,7 +1407,7 @@ vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_tum(
@@ -1416,7 +1416,7 @@ vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_tum(
@@ -1425,7 +1425,7 @@ vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tum(
@@ -1434,7 +1434,7 @@ vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tum(
@@ -1443,7 +1443,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_tum(
@@ -1452,7 +1452,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_tum(
@@ -1461,7 +1461,7 @@ vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_tum(
@@ -1470,7 +1470,7 @@ vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_tum(
@@ -1479,7 +1479,7 @@ vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_tum(
@@ -1488,7 +1488,7 @@ vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_tum(
@@ -1497,7 +1497,7 @@ vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_tum(
@@ -1506,7 +1506,7 @@ vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_tum(
@@ -1515,7 +1515,7 @@ vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_tum(
@@ -1524,7 +1524,7 @@ vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_tum(
@@ -1533,7 +1533,7 @@ vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_tum(
@@ -1542,7 +1542,7 @@ vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_tum(
@@ -1551,7 +1551,7 @@ vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_tum(
@@ -1560,7 +1560,7 @@ vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_tum(
@@ -1569,7 +1569,7 @@ vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_tum(
@@ -1578,7 +1578,7 @@ vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_tum(
@@ -1587,7 +1587,7 @@ vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vxor_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_tumu(
@@ -1596,7 +1596,7 @@ vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_tumu(
@@ -1605,7 +1605,7 @@ vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_tumu(
@@ -1614,7 +1614,7 @@ vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_tumu(
@@ -1623,7 +1623,7 @@ vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_tumu(
@@ -1632,7 +1632,7 @@ vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_tumu(
@@ -1641,7 +1641,7 @@ vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_tumu(
@@ -1650,7 +1650,7 @@ vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_tumu(
@@ -1659,7 +1659,7 @@ vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_tumu(
@@ -1668,7 +1668,7 @@ vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_tumu(
@@ -1677,7 +1677,7 @@ vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_tumu(
@@ -1686,7 +1686,7 @@ vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_tumu(
@@ -1695,7 +1695,7 @@ vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_tumu(
@@ -1704,7 +1704,7 @@ vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_tumu(
@@ -1713,7 +1713,7 @@ vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_tumu(
@@ -1722,7 +1722,7 @@ vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_tumu(
@@ -1731,7 +1731,7 @@ vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_tumu(
@@ -1740,7 +1740,7 @@ vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_tumu(
@@ -1749,7 +1749,7 @@ vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_tumu(
@@ -1758,7 +1758,7 @@ vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_tumu(
@@ -1767,7 +1767,7 @@ vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_tumu(
@@ -1776,7 +1776,7 @@ vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_tumu(
@@ -1785,7 +1785,7 @@ vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_tumu(
@@ -1794,7 +1794,7 @@ vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_tumu(
@@ -1803,7 +1803,7 @@ vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_tumu(
@@ -1812,7 +1812,7 @@ vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_tumu(
@@ -1821,7 +1821,7 @@ vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tumu(
@@ -1830,7 +1830,7 @@ vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tumu(
@@ -1839,7 +1839,7 @@ vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_tumu(
@@ -1848,7 +1848,7 @@ vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_tumu(
@@ -1857,7 +1857,7 @@ vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_tumu(
@@ -1866,7 +1866,7 @@ vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_tumu(
@@ -1875,7 +1875,7 @@ vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_tumu(
@@ -1884,7 +1884,7 @@ vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_tumu(
@@ -1893,7 +1893,7 @@ vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_tumu(
@@ -1902,7 +1902,7 @@ vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_tumu(
@@ -1911,7 +1911,7 @@ vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_tumu(
@@ -1920,7 +1920,7 @@ vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_tumu(
@@ -1929,7 +1929,7 @@ vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_tumu(
@@ -1938,7 +1938,7 @@ vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_tumu(
@@ -1947,7 +1947,7 @@ vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_tumu(
@@ -1956,7 +1956,7 @@ vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_tumu(
@@ -1965,7 +1965,7 @@ vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_tumu(
@@ -1974,7 +1974,7 @@ vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_tumu(
@@ -1983,7 +1983,7 @@ vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_tumu(
@@ -1992,7 +1992,7 @@ vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_tumu(
@@ -2001,7 +2001,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_tumu(
@@ -2010,7 +2010,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_tumu(
@@ -2019,7 +2019,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_tumu(
@@ -2028,7 +2028,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_tumu(
@@ -2037,7 +2037,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_tumu(
@@ -2046,7 +2046,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_tumu(
@@ -2055,7 +2055,7 @@ vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_tumu(
@@ -2064,7 +2064,7 @@ vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_tumu(
@@ -2073,7 +2073,7 @@ vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_tumu(
@@ -2082,7 +2082,7 @@ vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_tumu(
@@ -2091,7 +2091,7 @@ vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_tumu(
@@ -2100,7 +2100,7 @@ vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_tumu(
@@ -2109,7 +2109,7 @@ vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_tumu(
@@ -2118,7 +2118,7 @@ vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_tumu(
@@ -2127,7 +2127,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_tumu(
@@ -2136,7 +2136,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_tumu(
@@ -2145,7 +2145,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_tumu(
@@ -2154,7 +2154,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_tumu(
@@ -2163,7 +2163,7 @@ vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_tumu(
@@ -2172,7 +2172,7 @@ vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_tumu(
@@ -2181,7 +2181,7 @@ vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_tumu(
@@ -2190,7 +2190,7 @@ vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_tumu(
@@ -2199,7 +2199,7 @@ vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_tumu(
@@ -2208,7 +2208,7 @@ vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_tumu(
@@ -2217,7 +2217,7 @@ vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tumu(
@@ -2226,7 +2226,7 @@ vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tumu(
@@ -2235,7 +2235,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_tumu(
@@ -2244,7 +2244,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_tumu(
@@ -2253,7 +2253,7 @@ vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_tumu(
@@ -2262,7 +2262,7 @@ vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_tumu(
@@ -2271,7 +2271,7 @@ vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_tumu(
@@ -2280,7 +2280,7 @@ vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_tumu(
@@ -2289,7 +2289,7 @@ vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_tumu(
@@ -2298,7 +2298,7 @@ vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_tumu(
@@ -2307,7 +2307,7 @@ vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_tumu(
@@ -2316,7 +2316,7 @@ vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_tumu(
@@ -2325,7 +2325,7 @@ vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_tumu(
@@ -2334,7 +2334,7 @@ vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_tumu(
@@ -2343,7 +2343,7 @@ vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_tumu(
@@ -2352,7 +2352,7 @@ vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_tumu(
@@ -2361,7 +2361,7 @@ vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_tumu(
@@ -2370,7 +2370,7 @@ vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_tumu(
@@ -2379,7 +2379,7 @@ vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vxor_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_mu(
@@ -2388,7 +2388,7 @@ vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_mu(
@@ -2397,7 +2397,7 @@ vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_mu(
@@ -2406,7 +2406,7 @@ vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_mu(
@@ -2415,7 +2415,7 @@ vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_mu(
@@ -2424,7 +2424,7 @@ vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_mu(
@@ -2433,7 +2433,7 @@ vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_mu(
@@ -2442,7 +2442,7 @@ vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_mu(
@@ -2451,7 +2451,7 @@ vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_mu(
@@ -2460,7 +2460,7 @@ vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_mu(
@@ -2469,7 +2469,7 @@ vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_mu(
@@ -2478,7 +2478,7 @@ vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_mu(
@@ -2487,7 +2487,7 @@ vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_mu(
@@ -2496,7 +2496,7 @@ vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_mu(
@@ -2505,7 +2505,7 @@ vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_mu(
@@ -2514,7 +2514,7 @@ vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_mu(
@@ -2523,7 +2523,7 @@ vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_mu(
@@ -2532,7 +2532,7 @@ vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_mu(
@@ -2541,7 +2541,7 @@ vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_mu(
@@ -2550,7 +2550,7 @@ vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_mu(
@@ -2559,7 +2559,7 @@ vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_mu(
@@ -2568,7 +2568,7 @@ vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_mu(
@@ -2577,7 +2577,7 @@ vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_mu(
@@ -2586,7 +2586,7 @@ vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_mu(
@@ -2595,7 +2595,7 @@ vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_mu(
@@ -2604,7 +2604,7 @@ vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_mu(
@@ -2613,7 +2613,7 @@ vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_mu(
@@ -2622,7 +2622,7 @@ vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_mu(
@@ -2631,7 +2631,7 @@ vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_mu(
@@ -2640,7 +2640,7 @@ vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_mu(
@@ -2649,7 +2649,7 @@ vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_mu(
@@ -2658,7 +2658,7 @@ vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_mu(
@@ -2667,7 +2667,7 @@ vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_mu(
@@ -2676,7 +2676,7 @@ vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_mu(
@@ -2685,7 +2685,7 @@ vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_mu(
@@ -2694,7 +2694,7 @@ vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_mu(
@@ -2703,7 +2703,7 @@ vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_mu(
@@ -2712,7 +2712,7 @@ vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_mu(
@@ -2721,7 +2721,7 @@ vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_mu(
@@ -2730,7 +2730,7 @@ vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_mu(
@@ -2739,7 +2739,7 @@ vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_mu(
@@ -2748,7 +2748,7 @@ vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_mu(
@@ -2757,7 +2757,7 @@ vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_mu(
@@ -2766,7 +2766,7 @@ vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_mu(
@@ -2775,7 +2775,7 @@ vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_mu(
@@ -2784,7 +2784,7 @@ vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_mu(
@@ -2793,7 +2793,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_mu(
@@ -2802,7 +2802,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_mu(
@@ -2811,7 +2811,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_mu(
@@ -2820,7 +2820,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_mu(
@@ -2829,7 +2829,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_mu(
@@ -2838,7 +2838,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_mu(
@@ -2847,7 +2847,7 @@ vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_mu(
@@ -2856,7 +2856,7 @@ vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_mu(
@@ -2865,7 +2865,7 @@ vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_mu(
@@ -2874,7 +2874,7 @@ vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_mu(
@@ -2883,7 +2883,7 @@ vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_mu(
@@ -2892,7 +2892,7 @@ vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_mu(
@@ -2901,7 +2901,7 @@ vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_mu(
@@ -2910,7 +2910,7 @@ vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_mu(
@@ -2919,7 +2919,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_mu(
@@ -2928,7 +2928,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_mu(
@@ -2937,7 +2937,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_mu(
@@ -2946,7 +2946,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_mu(
@@ -2955,7 +2955,7 @@ vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_mu(
@@ -2964,7 +2964,7 @@ vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_mu(
@@ -2973,7 +2973,7 @@ vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_mu(
@@ -2982,7 +2982,7 @@ vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_mu(
@@ -2991,7 +2991,7 @@ vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_mu(
@@ -3000,7 +3000,7 @@ vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_mu(
@@ -3009,7 +3009,7 @@ vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_mu(
@@ -3018,7 +3018,7 @@ vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_mu(
@@ -3027,7 +3027,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_mu(
@@ -3036,7 +3036,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_mu(
@@ -3045,7 +3045,7 @@ vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_mu(
@@ -3054,7 +3054,7 @@ vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_mu(
@@ -3063,7 +3063,7 @@ vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_mu(
@@ -3072,7 +3072,7 @@ vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_mu(
@@ -3081,7 +3081,7 @@ vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_mu(
@@ -3090,7 +3090,7 @@ vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_mu(
@@ -3099,7 +3099,7 @@ vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_mu(
@@ -3108,7 +3108,7 @@ vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_mu(
@@ -3117,7 +3117,7 @@ vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_mu(
@@ -3126,7 +3126,7 @@ vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_mu(
@@ -3135,7 +3135,7 @@ vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_mu(
@@ -3144,7 +3144,7 @@ vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_mu(
@@ -3153,7 +3153,7 @@ vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_mu(
@@ -3162,7 +3162,7 @@ vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_mu(
@@ -3171,6 +3171,6 @@ vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vxor_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vxor_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext.c
index ae1f60954b8c..5f7b12175cb6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_tu(
@@ -21,7 +21,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_tu(
@@ -30,7 +30,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_tu(
@@ -39,7 +39,7 @@ vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vzext_vf2_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_tu(
@@ -48,7 +48,7 @@ vuint16m2_t test_vzext_vf2_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vzext_vf2_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_tu(
@@ -57,7 +57,7 @@ vuint16m4_t test_vzext_vf2_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vzext_vf2_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tu(
@@ -66,7 +66,7 @@ vuint16m8_t test_vzext_vf2_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tu(
@@ -75,7 +75,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, s
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tu(
@@ -84,7 +84,7 @@ vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tu(
@@ -93,7 +93,7 @@ vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tu(
@@ -102,7 +102,7 @@ vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vzext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tu(
@@ -111,7 +111,7 @@ vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf8_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tu(
@@ -120,7 +120,7 @@ vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf8_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tu(
@@ -129,7 +129,7 @@ vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf8_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tu(
@@ -138,7 +138,7 @@ vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf8_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tu(
@@ -147,7 +147,7 @@ vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_tu(
@@ -156,7 +156,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_tu(
@@ -165,7 +165,7 @@ vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_tu(
@@ -174,7 +174,7 @@ vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_tu(
@@ -183,7 +183,7 @@ vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tu(
@@ -192,7 +192,7 @@ vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vzext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tu(
@@ -201,7 +201,7 @@ vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vzext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tu(
@@ -210,7 +210,7 @@ vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vzext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tu(
@@ -219,7 +219,7 @@ vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vzext_vf4_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tu(
@@ -228,7 +228,7 @@ vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_tu(
@@ -237,7 +237,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_tu(
@@ -246,7 +246,7 @@ vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_tu(
@@ -255,7 +255,7 @@ vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) {
- return vzext_vf2_tu(maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tu(maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_tum(
@@ -264,7 +264,7 @@ vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_tum(
@@ -273,7 +273,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_tum(
@@ -282,7 +282,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_tum(
@@ -291,7 +291,7 @@ vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_tum(
@@ -300,7 +300,7 @@ vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_tum(
@@ -309,7 +309,7 @@ vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tum(
@@ -318,7 +318,7 @@ vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tum(
@@ -327,7 +327,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tum(
@@ -336,7 +336,7 @@ vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tum(
@@ -345,7 +345,7 @@ vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tum(
@@ -354,7 +354,7 @@ vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vzext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tum(
@@ -363,7 +363,7 @@ vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf8_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tum(
@@ -372,7 +372,7 @@ vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf8_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tum(
@@ -381,7 +381,7 @@ vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf8_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tum(
@@ -390,7 +390,7 @@ vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf8_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tum(
@@ -399,7 +399,7 @@ vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_tum(
@@ -408,7 +408,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_tum(
@@ -417,7 +417,7 @@ vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_tum(
@@ -426,7 +426,7 @@ vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_tum(
@@ -435,7 +435,7 @@ vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tum(
@@ -444,7 +444,7 @@ vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vzext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tum(
@@ -453,7 +453,7 @@ vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vzext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tum(
@@ -462,7 +462,7 @@ vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vzext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tum(
@@ -471,7 +471,7 @@ vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vzext_vf4_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tum(
@@ -480,7 +480,7 @@ vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_tum(
@@ -489,7 +489,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_tum(
@@ -498,7 +498,7 @@ vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_tum(
@@ -507,7 +507,7 @@ vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) {
- return vzext_vf2_tum(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_tumu(
@@ -516,7 +516,7 @@ vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_tumu(
@@ -525,7 +525,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_tumu(
@@ -534,7 +534,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_tumu(
@@ -543,7 +543,7 @@ vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_tumu(
@@ -552,7 +552,7 @@ vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_tumu(
@@ -561,7 +561,7 @@ vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tumu(
@@ -570,7 +570,7 @@ vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tumu(
@@ -579,7 +579,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tumu(
@@ -588,7 +588,7 @@ vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tumu(
@@ -597,7 +597,7 @@ vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tumu(
@@ -606,7 +606,7 @@ vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vzext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tumu(
@@ -615,7 +615,7 @@ vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf8_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tumu(
@@ -624,7 +624,7 @@ vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf8_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tumu(
@@ -633,7 +633,7 @@ vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf8_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tumu(
@@ -642,7 +642,7 @@ vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf8_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tumu(
@@ -651,7 +651,7 @@ vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_tumu(
@@ -660,7 +660,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_tumu(
@@ -669,7 +669,7 @@ vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_tumu(
@@ -678,7 +678,7 @@ vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_tumu(
@@ -687,7 +687,7 @@ vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tumu(
@@ -696,7 +696,7 @@ vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vzext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tumu(
@@ -705,7 +705,7 @@ vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vzext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tumu(
@@ -714,7 +714,7 @@ vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vzext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tumu(
@@ -723,7 +723,7 @@ vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vzext_vf4_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tumu(
@@ -732,7 +732,7 @@ vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_tumu(
@@ -741,7 +741,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_tumu(
@@ -750,7 +750,7 @@ vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_tumu(
@@ -759,7 +759,7 @@ vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) {
- return vzext_vf2_tumu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_mu(
@@ -768,7 +768,7 @@ vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_mu(
@@ -777,7 +777,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_mu(
@@ -786,7 +786,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_mu(
@@ -795,7 +795,7 @@ vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_mu(
@@ -804,7 +804,7 @@ vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_mu(
@@ -813,7 +813,7 @@ vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_mu(
@@ -822,7 +822,7 @@ vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_mu(
@@ -831,7 +831,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_mu(
@@ -840,7 +840,7 @@ vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_mu(
@@ -849,7 +849,7 @@ vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_mu(
@@ -858,7 +858,7 @@ vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) {
- return vzext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mu(
@@ -867,7 +867,7 @@ vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) {
- return vzext_vf8_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mu(
@@ -876,7 +876,7 @@ vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) {
- return vzext_vf8_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mu(
@@ -885,7 +885,7 @@ vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) {
- return vzext_vf8_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mu(
@@ -894,7 +894,7 @@ vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) {
- return vzext_vf8_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_mu(
@@ -903,7 +903,7 @@ vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_mu(
@@ -912,7 +912,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_mu(
@@ -921,7 +921,7 @@ vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_mu(
@@ -930,7 +930,7 @@ vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_mu(
@@ -939,7 +939,7 @@ vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_mu(
@@ -948,7 +948,7 @@ vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) {
- return vzext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_mu(
@@ -957,7 +957,7 @@ vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) {
- return vzext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_mu(
@@ -966,7 +966,7 @@ vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) {
- return vzext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_mu(
@@ -975,7 +975,7 @@ vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) {
- return vzext_vf4_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_mu(
@@ -984,7 +984,7 @@ vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint1
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_mu(
@@ -993,7 +993,7 @@ vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_mu(
@@ -1002,7 +1002,7 @@ vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_mu(
@@ -1011,6 +1011,6 @@ vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf2_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) {
- return vzext_vf2_mu(mask, maskedoff, op1, vl);
+ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-overloaded.c
index 1875baef68a6..90af56a153a2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-overloaded.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-overloaded.c
@@ -11,7 +11,7 @@
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) {
- return vget_i8m1(src, 0);
+ return __riscv_vget_i8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1(
@@ -20,7 +20,7 @@ vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) {
- return vget_i8m1(src, 0);
+ return __riscv_vget_i8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1(
@@ -29,7 +29,7 @@ vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) {
- return vget_i8m1(src, 0);
+ return __riscv_vget_i8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2(
@@ -38,7 +38,7 @@ vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) {
- return vget_i8m2(src, 0);
+ return __riscv_vget_i8m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2(
@@ -47,7 +47,7 @@ vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) {
- return vget_i8m2(src, 0);
+ return __riscv_vget_i8m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4(
@@ -56,7 +56,7 @@ vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) {
- return vget_i8m4(src, 0);
+ return __riscv_vget_i8m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1(
@@ -65,7 +65,7 @@ vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) {
- return vget_u8m1(src, 0);
+ return __riscv_vget_u8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1(
@@ -74,7 +74,7 @@ vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) {
- return vget_u8m1(src, 0);
+ return __riscv_vget_u8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1(
@@ -83,7 +83,7 @@ vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) {
- return vget_u8m1(src, 0);
+ return __riscv_vget_u8m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2(
@@ -92,7 +92,7 @@ vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) {
- return vget_u8m2(src, 0);
+ return __riscv_vget_u8m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2(
@@ -101,7 +101,7 @@ vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) {
- return vget_u8m2(src, 0);
+ return __riscv_vget_u8m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4(
@@ -110,7 +110,7 @@ vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) {
- return vget_u8m4(src, 0);
+ return __riscv_vget_u8m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1(
@@ -119,7 +119,7 @@ vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) {
- return vget_i16m1(src, 0);
+ return __riscv_vget_i16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1(
@@ -128,7 +128,7 @@ vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) {
- return vget_i16m1(src, 0);
+ return __riscv_vget_i16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1(
@@ -137,7 +137,7 @@ vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) {
- return vget_i16m1(src, 0);
+ return __riscv_vget_i16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2(
@@ -146,7 +146,7 @@ vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) {
- return vget_i16m2(src, 0);
+ return __riscv_vget_i16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2(
@@ -155,7 +155,7 @@ vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) {
- return vget_i16m2(src, 0);
+ return __riscv_vget_i16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4(
@@ -164,7 +164,7 @@ vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) {
- return vget_i16m4(src, 0);
+ return __riscv_vget_i16m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1(
@@ -173,7 +173,7 @@ vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) {
- return vget_u16m1(src, 0);
+ return __riscv_vget_u16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1(
@@ -182,7 +182,7 @@ vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) {
- return vget_u16m1(src, 0);
+ return __riscv_vget_u16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1(
@@ -191,7 +191,7 @@ vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) {
- return vget_u16m1(src, 0);
+ return __riscv_vget_u16m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2(
@@ -200,7 +200,7 @@ vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) {
- return vget_u16m2(src, 0);
+ return __riscv_vget_u16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2(
@@ -209,7 +209,7 @@ vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) {
- return vget_u16m2(src, 0);
+ return __riscv_vget_u16m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4(
@@ -218,7 +218,7 @@ vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) {
- return vget_u16m4(src, 0);
+ return __riscv_vget_u16m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1(
@@ -227,7 +227,7 @@ vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) {
- return vget_i32m1(src, 0);
+ return __riscv_vget_i32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1(
@@ -236,7 +236,7 @@ vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) {
- return vget_i32m1(src, 0);
+ return __riscv_vget_i32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1(
@@ -245,7 +245,7 @@ vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) {
- return vget_i32m1(src, 0);
+ return __riscv_vget_i32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2(
@@ -254,7 +254,7 @@ vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) {
- return vget_i32m2(src, 0);
+ return __riscv_vget_i32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2(
@@ -263,7 +263,7 @@ vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) {
- return vget_i32m2(src, 0);
+ return __riscv_vget_i32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4(
@@ -272,7 +272,7 @@ vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) {
- return vget_i32m4(src, 0);
+ return __riscv_vget_i32m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1(
@@ -281,7 +281,7 @@ vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) {
- return vget_u32m1(src, 0);
+ return __riscv_vget_u32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1(
@@ -290,7 +290,7 @@ vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) {
- return vget_u32m1(src, 0);
+ return __riscv_vget_u32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1(
@@ -299,7 +299,7 @@ vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) {
- return vget_u32m1(src, 0);
+ return __riscv_vget_u32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2(
@@ -308,7 +308,7 @@ vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) {
- return vget_u32m2(src, 0);
+ return __riscv_vget_u32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2(
@@ -317,7 +317,7 @@ vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) {
- return vget_u32m2(src, 0);
+ return __riscv_vget_u32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4(
@@ -326,7 +326,7 @@ vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) {
- return vget_u32m4(src, 0);
+ return __riscv_vget_u32m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1(
@@ -335,7 +335,7 @@ vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) {
- return vget_f32m1(src, 0);
+ return __riscv_vget_f32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1(
@@ -344,7 +344,7 @@ vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) {
- return vget_f32m1(src, 0);
+ return __riscv_vget_f32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1(
@@ -353,7 +353,7 @@ vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) {
- return vget_f32m1(src, 0);
+ return __riscv_vget_f32m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2(
@@ -362,7 +362,7 @@ vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) {
- return vget_f32m2(src, 0);
+ return __riscv_vget_f32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2(
@@ -371,7 +371,7 @@ vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) {
- return vget_f32m2(src, 0);
+ return __riscv_vget_f32m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4(
@@ -380,7 +380,7 @@ vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) {
- return vget_f32m4(src, 0);
+ return __riscv_vget_f32m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1(
@@ -389,7 +389,7 @@ vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) {
- return vget_i64m1(src, 0);
+ return __riscv_vget_i64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1(
@@ -398,7 +398,7 @@ vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) {
- return vget_i64m1(src, 0);
+ return __riscv_vget_i64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1(
@@ -407,7 +407,7 @@ vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) {
- return vget_i64m1(src, 0);
+ return __riscv_vget_i64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2(
@@ -416,7 +416,7 @@ vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) {
- return vget_i64m2(src, 0);
+ return __riscv_vget_i64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2(
@@ -425,7 +425,7 @@ vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) {
- return vget_i64m2(src, 0);
+ return __riscv_vget_i64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4(
@@ -434,7 +434,7 @@ vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) {
- return vget_i64m4(src, 0);
+ return __riscv_vget_i64m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1(
@@ -443,7 +443,7 @@ vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) {
- return vget_u64m1(src, 0);
+ return __riscv_vget_u64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1(
@@ -452,7 +452,7 @@ vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) {
- return vget_u64m1(src, 0);
+ return __riscv_vget_u64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1(
@@ -461,7 +461,7 @@ vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) {
- return vget_u64m1(src, 0);
+ return __riscv_vget_u64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2(
@@ -470,7 +470,7 @@ vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) {
- return vget_u64m2(src, 0);
+ return __riscv_vget_u64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2(
@@ -479,7 +479,7 @@ vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) {
- return vget_u64m2(src, 0);
+ return __riscv_vget_u64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4(
@@ -488,7 +488,7 @@ vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
- return vget_u64m4(src, 0);
+ return __riscv_vget_u64m4(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1(
@@ -497,7 +497,7 @@ vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) {
- return vget_f64m1(src, 0);
+ return __riscv_vget_f64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1(
@@ -506,7 +506,7 @@ vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) {
- return vget_f64m1(src, 0);
+ return __riscv_vget_f64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1(
@@ -515,7 +515,7 @@ vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) {
- return vget_f64m1(src, 0);
+ return __riscv_vget_f64m1(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2(
@@ -524,7 +524,7 @@ vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) {
- return vget_f64m2(src, 0);
+ return __riscv_vget_f64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2(
@@ -533,7 +533,7 @@ vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) {
- return vget_f64m2(src, 0);
+ return __riscv_vget_f64m2(src, 0);
}
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4(
@@ -542,6 +542,6 @@ vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) {
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) {
- return vget_f64m4(src, 0);
+ return __riscv_vget_f64m4(src, 0);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vlenb.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vlenb.c
index 999e1accbed2..b65e657652fc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vlenb.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vlenb.c
@@ -18,7 +18,7 @@
// RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vlenb(void) {
- return vlenb();
+ return __riscv_vlenb();
}
//.
// RV32: attributes #0 = { mustprogress nofree noinline nosync nounwind willreturn memory(read) vscale_range(2,1024) "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+32bit,+d,+f,+v,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl128b,+zvl32b,+zvl64b" }
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64-overloaded.c
index d48ffd0704a2..758d8aa73ab1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64-overloaded.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64-overloaded.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
@@ -22,7 +22,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
@@ -31,7 +31,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
@@ -40,7 +40,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
@@ -49,7 +49,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
@@ -58,7 +58,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
@@ -67,7 +67,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
@@ -76,7 +76,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
@@ -85,7 +85,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
@@ -94,7 +94,7 @@ vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
@@ -103,7 +103,7 @@ vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
@@ -112,7 +112,7 @@ vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
@@ -121,7 +121,7 @@ vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
@@ -130,7 +130,7 @@ vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
@@ -139,7 +139,7 @@ vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
@@ -148,5 +148,5 @@ vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-overloaded.c
index 405e9104f1b5..b07825a831f2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-overloaded.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-overloaded.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return vmulh(op1, op2, vl);
+ return __riscv_vmulh(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m(
@@ -336,7 +336,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m(
@@ -345,7 +345,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m(
@@ -354,7 +354,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m(
@@ -363,7 +363,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m(
@@ -372,7 +372,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m(
@@ -381,7 +381,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m(
@@ -390,7 +390,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m(
@@ -399,7 +399,7 @@ vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m(
@@ -408,7 +408,7 @@ vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m(
@@ -417,7 +417,7 @@ vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m(
@@ -426,7 +426,7 @@ vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m(
@@ -435,7 +435,7 @@ vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m(
@@ -444,7 +444,7 @@ vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m(
@@ -453,7 +453,7 @@ vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m(
@@ -462,7 +462,7 @@ vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m(
@@ -471,7 +471,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m(
@@ -480,7 +480,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m(
@@ -489,7 +489,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m(
@@ -498,7 +498,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m(
@@ -507,7 +507,7 @@ vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m(
@@ -516,7 +516,7 @@ vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m(
@@ -525,7 +525,7 @@ vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m(
@@ -534,7 +534,7 @@ vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m(
@@ -543,7 +543,7 @@ vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m(
@@ -552,7 +552,7 @@ vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m(
@@ -561,7 +561,7 @@ vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m(
@@ -570,7 +570,7 @@ vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m(
@@ -579,7 +579,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m(
@@ -588,7 +588,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m(
@@ -597,7 +597,7 @@ vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m(
@@ -606,7 +606,7 @@ vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m(
@@ -615,7 +615,7 @@ vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m(
@@ -624,7 +624,7 @@ vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m(
@@ -633,7 +633,7 @@ vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m(
@@ -642,7 +642,7 @@ vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m(
@@ -651,5 +651,5 @@ vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return vmulh(mask, op1, op2, vl);
+ return __riscv_vmulh(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64-overloaded.c
index c0a2080e6d54..acd1820f2e0b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64-overloaded.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64-overloaded.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
@@ -22,7 +22,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
@@ -31,7 +31,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
@@ -40,7 +40,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
@@ -49,7 +49,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
@@ -58,7 +58,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
@@ -67,7 +67,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
@@ -76,7 +76,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
@@ -85,7 +85,7 @@ vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
@@ -94,7 +94,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
@@ -103,7 +103,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
@@ -112,7 +112,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
@@ -121,7 +121,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
@@ -130,7 +130,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
@@ -139,7 +139,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
@@ -148,5 +148,5 @@ vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-overloaded.c
index 481c96039f9e..396611b766b5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-overloaded.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-overloaded.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
@@ -21,7 +21,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
@@ -30,7 +30,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
@@ -39,7 +39,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
@@ -48,7 +48,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
@@ -57,7 +57,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
@@ -66,7 +66,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
@@ -75,7 +75,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
@@ -84,7 +84,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
@@ -93,7 +93,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
@@ -102,7 +102,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
@@ -111,7 +111,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
@@ -120,7 +120,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
@@ -129,7 +129,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
@@ -138,7 +138,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
@@ -147,7 +147,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
@@ -156,7 +156,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
@@ -165,7 +165,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
@@ -174,7 +174,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
@@ -183,7 +183,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
@@ -192,7 +192,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
@@ -201,7 +201,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
@@ -210,7 +210,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
@@ -219,7 +219,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
@@ -228,7 +228,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
@@ -237,7 +237,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
@@ -246,7 +246,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
@@ -255,7 +255,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
@@ -264,7 +264,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
@@ -273,7 +273,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
@@ -282,7 +282,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
@@ -291,7 +291,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
@@ -300,7 +300,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
@@ -309,7 +309,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
@@ -318,7 +318,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
@@ -327,7 +327,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(op1, op2, vl);
+ return __riscv_vmulhsu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
@@ -336,7 +336,7 @@ vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m(
@@ -345,7 +345,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m(
@@ -354,7 +354,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m(
@@ -363,7 +363,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m(
@@ -372,7 +372,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m(
@@ -381,7 +381,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m(
@@ -390,7 +390,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m(
@@ -399,7 +399,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m(
@@ -408,7 +408,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m(
@@ -417,7 +417,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m(
@@ -426,7 +426,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m(
@@ -435,7 +435,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m(
@@ -444,7 +444,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m(
@@ -453,7 +453,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m(
@@ -462,7 +462,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m(
@@ -471,7 +471,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m(
@@ -480,7 +480,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m(
@@ -489,7 +489,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m(
@@ -498,7 +498,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m(
@@ -507,7 +507,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m(
@@ -516,7 +516,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m(
@@ -525,7 +525,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m(
@@ -534,7 +534,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m(
@@ -543,7 +543,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m(
@@ -552,7 +552,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m(
@@ -561,7 +561,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m(
@@ -570,7 +570,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m(
@@ -579,7 +579,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m(
@@ -588,7 +588,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m(
@@ -597,7 +597,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t o
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m(
@@ -606,7 +606,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m(
@@ -615,7 +615,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t o
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m(
@@ -624,7 +624,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m(
@@ -633,7 +633,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m(
@@ -642,7 +642,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m(
@@ -651,5 +651,5 @@ vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhsu(mask, op1, op2, vl);
+ return __riscv_vmulhsu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64-overloaded.c
index aeba7d62a5ab..e69eebf9a18b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64-overloaded.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64-overloaded.c
@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
@@ -22,7 +22,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
@@ -31,7 +31,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
@@ -40,7 +40,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
@@ -49,7 +49,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
@@ -58,7 +58,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
@@ -67,7 +67,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
@@ -76,7 +76,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
@@ -85,7 +85,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
@@ -94,7 +94,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
@@ -103,7 +103,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
@@ -112,7 +112,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
@@ -121,7 +121,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
@@ -130,7 +130,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
@@ -139,7 +139,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
@@ -148,5 +148,5 @@ vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c
index 2384b2fae718..46cdbca19dba 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
@@ -21,7 +21,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
@@ -30,7 +30,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
@@ -39,7 +39,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
@@ -48,7 +48,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
@@ -57,7 +57,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
@@ -66,7 +66,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
@@ -75,7 +75,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
@@ -84,7 +84,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
@@ -93,7 +93,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
@@ -102,7 +102,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
@@ -111,7 +111,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
@@ -120,7 +120,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
@@ -129,7 +129,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
@@ -138,7 +138,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
@@ -147,7 +147,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
@@ -156,7 +156,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
@@ -165,7 +165,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
@@ -174,7 +174,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
@@ -183,7 +183,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
@@ -192,7 +192,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
@@ -201,7 +201,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
@@ -210,7 +210,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
@@ -219,7 +219,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
@@ -228,7 +228,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
@@ -237,7 +237,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
@@ -246,7 +246,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
@@ -255,7 +255,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
@@ -264,7 +264,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
@@ -273,7 +273,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
@@ -282,7 +282,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
@@ -291,7 +291,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
@@ -300,7 +300,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
@@ -309,7 +309,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
@@ -318,7 +318,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
@@ -327,7 +327,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhu(op1, op2, vl);
+ return __riscv_vmulhu(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
@@ -336,7 +336,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m(
@@ -345,7 +345,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m(
@@ -354,7 +354,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m(
@@ -363,7 +363,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m(
@@ -372,7 +372,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m(
@@ -381,7 +381,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m(
@@ -390,7 +390,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m(
@@ -399,7 +399,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m(
@@ -408,7 +408,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m(
@@ -417,7 +417,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m(
@@ -426,7 +426,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m(
@@ -435,7 +435,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m(
@@ -444,7 +444,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m(
@@ -453,7 +453,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m(
@@ -462,7 +462,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m(
@@ -471,7 +471,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m(
@@ -480,7 +480,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m(
@@ -489,7 +489,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m(
@@ -498,7 +498,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m(
@@ -507,7 +507,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m(
@@ -516,7 +516,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m(
@@ -525,7 +525,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m(
@@ -534,7 +534,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m(
@@ -543,7 +543,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m(
@@ -552,7 +552,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m(
@@ -561,7 +561,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m(
@@ -570,7 +570,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m(
@@ -579,7 +579,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m(
@@ -588,7 +588,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m(
@@ -597,7 +597,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m(
@@ -606,7 +606,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m(
@@ -615,7 +615,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m(
@@ -624,7 +624,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m(
@@ -633,7 +633,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m(
@@ -642,7 +642,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m(
@@ -651,5 +651,5 @@ vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vmulhu(mask, op1, op2, vl);
+ return __riscv_vmulhu(mask, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vset-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vset-overloaded.c
index 683dea0ef537..5d8fd675a985 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vset-overloaded.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vset-overloaded.c
@@ -11,7 +11,7 @@
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4(
@@ -20,7 +20,7 @@ vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4(
@@ -29,7 +29,7 @@ vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) {
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8(
@@ -38,7 +38,7 @@ vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8(
@@ -47,7 +47,7 @@ vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8(
@@ -56,7 +56,7 @@ vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) {
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2(
@@ -65,7 +65,7 @@ vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) {
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4(
@@ -74,7 +74,7 @@ vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4(
@@ -83,7 +83,7 @@ vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8(
@@ -92,7 +92,7 @@ vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8(
@@ -101,7 +101,7 @@ vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8(
@@ -110,7 +110,7 @@ vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2(
@@ -119,7 +119,7 @@ vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4(
@@ -128,7 +128,7 @@ vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4(
@@ -137,7 +137,7 @@ vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8(
@@ -146,7 +146,7 @@ vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8(
@@ -155,7 +155,7 @@ vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8(
@@ -164,7 +164,7 @@ vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2(
@@ -173,7 +173,7 @@ vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4(
@@ -182,7 +182,7 @@ vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4(
@@ -191,7 +191,7 @@ vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8(
@@ -200,7 +200,7 @@ vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8(
@@ -209,7 +209,7 @@ vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8(
@@ -218,7 +218,7 @@ vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2(
@@ -227,7 +227,7 @@ vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4(
@@ -236,7 +236,7 @@ vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4(
@@ -245,7 +245,7 @@ vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8(
@@ -254,7 +254,7 @@ vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8(
@@ -263,7 +263,7 @@ vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8(
@@ -272,7 +272,7 @@ vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2(
@@ -281,7 +281,7 @@ vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4(
@@ -290,7 +290,7 @@ vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4(
@@ -299,7 +299,7 @@ vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8(
@@ -308,7 +308,7 @@ vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8(
@@ -317,7 +317,7 @@ vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8(
@@ -326,7 +326,7 @@ vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2(
@@ -335,7 +335,7 @@ vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4(
@@ -344,7 +344,7 @@ vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4(
@@ -353,7 +353,7 @@ vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8(
@@ -362,7 +362,7 @@ vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8(
@@ -371,7 +371,7 @@ vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8(
@@ -380,7 +380,7 @@ vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2(
@@ -389,7 +389,7 @@ vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4(
@@ -398,7 +398,7 @@ vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4(
@@ -407,7 +407,7 @@ vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8(
@@ -416,7 +416,7 @@ vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8(
@@ -425,7 +425,7 @@ vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8(
@@ -434,7 +434,7 @@ vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2(
@@ -443,7 +443,7 @@ vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4(
@@ -452,7 +452,7 @@ vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4(
@@ -461,7 +461,7 @@ vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8(
@@ -470,7 +470,7 @@ vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8(
@@ -479,7 +479,7 @@ vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8(
@@ -488,7 +488,7 @@ vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2(
@@ -497,7 +497,7 @@ vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4(
@@ -506,7 +506,7 @@ vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4(
@@ -515,7 +515,7 @@ vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8(
@@ -524,7 +524,7 @@ vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8(
@@ -533,7 +533,7 @@ vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
// CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8(
@@ -542,5 +542,5 @@ vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) {
- return vset(dest, 0, val);
+ return __riscv_vset(dest, 0, val);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c
index 35560ebfcd48..73a8fe4ca058 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c
@@ -12,7 +12,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
@@ -21,7 +21,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
@@ -30,7 +30,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
@@ -39,7 +39,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
@@ -48,7 +48,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
@@ -57,7 +57,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
@@ -66,7 +66,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
@@ -75,7 +75,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
@@ -84,7 +84,7 @@ vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
@@ -93,7 +93,7 @@ vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
@@ -102,7 +102,7 @@ vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
@@ -111,7 +111,7 @@ vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
@@ -120,7 +120,7 @@ vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
@@ -129,7 +129,7 @@ vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
@@ -138,7 +138,7 @@ vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
@@ -147,5 +147,5 @@ vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, vl);
}